Merge tag 'omap-for-v3.8/fixes-for-merge-window-v4-signed' of git://git.kernel.org...
authorOlof Johansson <olof@lixom.net>
Tue, 18 Dec 2012 02:39:47 +0000 (18:39 -0800)
committerOlof Johansson <olof@lixom.net>
Tue, 18 Dec 2012 02:39:47 +0000 (18:39 -0800)
From Tony Lindgren:

These patches fixes a build error caused by a merge
conflict with the fb code, few timer warnings, and longer
term regressions for tfp410 and omap h4 ethernet. Also
included is a GPIO mode fix for the legacy mux code.

* tag 'omap-for-v3.8/fixes-for-merge-window-v4-signed' of git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap:
  ARM: OMAP2+: common: remove use of vram
  ARM: OMAP: Move plat/omap-serial.h to include/linux/platform_data/serial-omap.h
  ARM: dts: Add build target for omap4-panda-a4
  ARM: dts: OMAP2420: Correct H4 board memory size
  mfd: omap-usb-host: get rid of cpu_is_omap..() macros
  ARM: OMAP: Remove debug-devices.c
  ARM: OMAP2420: Fix ethernet support for OMAP2420 H4
  OMAP2+: mux: Fixed gpio mux mode analysis
  OMAP: board-files: fix i2c_bus for tfp410
  ARM: OMAP2+: Fix sparse warnings in timer.c
  ARM: AM335x: Fix warning in timer.c
  ARM: OMAP2+: Fix realtime_counter_init warning in timer.c

698 files changed:
Documentation/DMA-attributes.txt
Documentation/DocBook/drm.tmpl
Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt [new file with mode: 0644]
Documentation/devicetree/bindings/mfd/stmpe.txt [new file with mode: 0644]
Documentation/devicetree/bindings/regulator/tps65217.txt
Documentation/filesystems/ext4.txt
Documentation/i2c/smbus-protocol
Documentation/kernel-parameters.txt
Documentation/kref.txt
Documentation/prctl/seccomp_filter.txt
Documentation/security/keys.txt
MAINTAINERS
arch/arm/boot/dts/Makefile
arch/arm/boot/dts/sun4i-cubieboard.dts
arch/arm/boot/dts/sun5i-olinuxino.dts
arch/arm/mach-davinci/board-da850-evm.c
arch/arm/mach-exynos/common.h
arch/arm/mach-omap2/cclock44xx_data.c
arch/arm/mach-omap2/clock.h
arch/arm/mach-omap2/clockdomain.c
arch/arm/mach-omap2/cpuidle34xx.c
arch/arm/mach-omap2/cpuidle44xx.c
arch/arm/mach-omap2/dpll3xxx.c
arch/arm/mach-omap2/dpll44xx.c
arch/arm/mach-tegra/common.c
arch/arm/mach-tegra/tegra30_clocks.c
arch/arm/mach-u300/core.c
arch/arm/mach-ux500/devices-db8500.h
arch/arm/mm/dma-mapping.c
arch/m68k/Kconfig.cpu
arch/m68k/Makefile
arch/m68k/include/asm/m5249sim.h [deleted file]
arch/m68k/include/asm/m525xsim.h
arch/m68k/include/asm/mcfclk.h
arch/m68k/include/asm/mcfsim.h
arch/m68k/include/asm/page_no.h
arch/m68k/lib/memcpy.c
arch/m68k/platform/68000/Makefile [new file with mode: 0644]
arch/m68k/platform/68000/bootlogo-vz.h [moved from arch/m68k/platform/68VZ328/bootlogo.h with 100% similarity]
arch/m68k/platform/68000/bootlogo.h [moved from arch/m68k/platform/68328/bootlogo.h with 100% similarity]
arch/m68k/platform/68000/entry.S [moved from arch/m68k/platform/68328/entry.S with 100% similarity]
arch/m68k/platform/68000/head.S [new file with mode: 0644]
arch/m68k/platform/68000/ints.c [moved from arch/m68k/platform/68328/ints.c with 98% similarity]
arch/m68k/platform/68000/m68328.c [moved from arch/m68k/platform/68328/config.c with 97% similarity]
arch/m68k/platform/68000/m68EZ328.c [moved from arch/m68k/platform/68EZ328/config.c with 97% similarity]
arch/m68k/platform/68000/m68VZ328.c [moved from arch/m68k/platform/68VZ328/config.c with 98% similarity]
arch/m68k/platform/68000/romvec.S [moved from arch/m68k/platform/68328/romvec.S with 94% similarity]
arch/m68k/platform/68000/timers.c [moved from arch/m68k/platform/68328/timers.c with 98% similarity]
arch/m68k/platform/68328/Makefile [deleted file]
arch/m68k/platform/68328/head-de2.S [deleted file]
arch/m68k/platform/68328/head-pilot.S [deleted file]
arch/m68k/platform/68328/head-ram.S [deleted file]
arch/m68k/platform/68328/head-rom.S [deleted file]
arch/m68k/platform/68EZ328/Makefile [deleted file]
arch/m68k/platform/68VZ328/Makefile [deleted file]
arch/m68k/platform/coldfire/clk.c
arch/m68k/platform/coldfire/intc-5249.c
arch/m68k/platform/coldfire/m5206.c
arch/m68k/platform/coldfire/m523x.c
arch/m68k/platform/coldfire/m5249.c
arch/m68k/platform/coldfire/m525x.c
arch/m68k/platform/coldfire/m5272.c
arch/m68k/platform/coldfire/m527x.c
arch/m68k/platform/coldfire/m528x.c
arch/m68k/platform/coldfire/m5307.c
arch/m68k/platform/coldfire/m5407.c
arch/m68k/platform/coldfire/m54xx.c
arch/sh/mm/Kconfig
arch/x86/Kconfig
arch/x86/include/asm/efi.h
arch/x86/include/asm/pgtable.h
arch/x86/include/asm/pgtable_types.h
arch/x86/kernel/tboot.c
arch/x86/kernel/vsyscall_64.c
arch/x86/mm/pageattr.c
arch/x86/mm/pgtable.c
arch/x86/platform/efi/efi.c
arch/x86/platform/efi/efi_64.c
block/blk-cgroup.c
block/blk-core.c
block/blk-exec.c
block/blk-lib.c
block/blk-settings.c
block/blk-sysfs.c
block/blk-throttle.c
block/blk.h
block/bsg-lib.c
block/cfq-iosched.c
block/deadline-iosched.c
block/elevator.c
block/genhd.c
block/partitions/Kconfig
drivers/amba/tegra-ahb.c
drivers/bus/Kconfig
drivers/char/agp/intel-agp.h
drivers/char/agp/intel-gtt.c
drivers/char/tpm/tpm_ibmvtpm.c
drivers/char/tpm/tpm_ibmvtpm.h
drivers/extcon/extcon-arizona.c
drivers/extcon/extcon-class.c
drivers/extcon/extcon-max77693.c
drivers/extcon/extcon-max8997.c
drivers/gpio/Kconfig
drivers/gpio/Makefile
drivers/gpio/gpio-da9052.c
drivers/gpio/gpio-tps6586x.c
drivers/gpio/gpio-twl4030.c
drivers/gpio/gpio-viperboard.c [new file with mode: 0644]
drivers/gpu/drm/Kconfig
drivers/gpu/drm/Makefile
drivers/gpu/drm/ast/ast_ttm.c
drivers/gpu/drm/cirrus/cirrus_drv.c
drivers/gpu/drm/cirrus/cirrus_ttm.c
drivers/gpu/drm/drm_crtc.c
drivers/gpu/drm/drm_crtc_helper.c
drivers/gpu/drm/drm_dp_helper.c [moved from drivers/gpu/drm/drm_dp_i2c_helper.c with 58% similarity]
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/drm_hashtab.c
drivers/gpu/drm/drm_ioctl.c
drivers/gpu/drm/drm_irq.c
drivers/gpu/drm/drm_modes.c
drivers/gpu/drm/drm_pci.c
drivers/gpu/drm/drm_stub.c
drivers/gpu/drm/drm_sysfs.c
drivers/gpu/drm/exynos/Kconfig
drivers/gpu/drm/exynos/Makefile
drivers/gpu/drm/exynos/exynos_ddc.c
drivers/gpu/drm/exynos/exynos_drm_buf.c
drivers/gpu/drm/exynos/exynos_drm_buf.h
drivers/gpu/drm/exynos/exynos_drm_crtc.c
drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
drivers/gpu/drm/exynos/exynos_drm_drv.c
drivers/gpu/drm/exynos/exynos_drm_drv.h
drivers/gpu/drm/exynos/exynos_drm_encoder.c
drivers/gpu/drm/exynos/exynos_drm_encoder.h
drivers/gpu/drm/exynos/exynos_drm_fb.c
drivers/gpu/drm/exynos/exynos_drm_fbdev.c
drivers/gpu/drm/exynos/exynos_drm_fimc.c [new file with mode: 0644]
drivers/gpu/drm/exynos/exynos_drm_fimc.h [new file with mode: 0644]
drivers/gpu/drm/exynos/exynos_drm_fimd.c
drivers/gpu/drm/exynos/exynos_drm_g2d.c
drivers/gpu/drm/exynos/exynos_drm_gem.c
drivers/gpu/drm/exynos/exynos_drm_gem.h
drivers/gpu/drm/exynos/exynos_drm_gsc.c [new file with mode: 0644]
drivers/gpu/drm/exynos/exynos_drm_gsc.h [new file with mode: 0644]
drivers/gpu/drm/exynos/exynos_drm_hdmi.c
drivers/gpu/drm/exynos/exynos_drm_hdmi.h
drivers/gpu/drm/exynos/exynos_drm_iommu.c [new file with mode: 0644]
drivers/gpu/drm/exynos/exynos_drm_iommu.h [new file with mode: 0644]
drivers/gpu/drm/exynos/exynos_drm_ipp.c [new file with mode: 0644]
drivers/gpu/drm/exynos/exynos_drm_ipp.h [new file with mode: 0644]
drivers/gpu/drm/exynos/exynos_drm_plane.c
drivers/gpu/drm/exynos/exynos_drm_rotator.c [new file with mode: 0644]
drivers/gpu/drm/exynos/exynos_drm_rotator.h [new file with mode: 0644]
drivers/gpu/drm/exynos/exynos_drm_vidi.c
drivers/gpu/drm/exynos/exynos_hdmi.c
drivers/gpu/drm/exynos/exynos_hdmiphy.c
drivers/gpu/drm/exynos/exynos_mixer.c
drivers/gpu/drm/exynos/regs-fimc.h [new file with mode: 0644]
drivers/gpu/drm/exynos/regs-gsc.h [new file with mode: 0644]
drivers/gpu/drm/exynos/regs-hdmi.h
drivers/gpu/drm/exynos/regs-rotator.h [new file with mode: 0644]
drivers/gpu/drm/gma500/cdv_device.c
drivers/gpu/drm/gma500/cdv_intel_dp.c
drivers/gpu/drm/gma500/cdv_intel_hdmi.c
drivers/gpu/drm/gma500/cdv_intel_lvds.c
drivers/gpu/drm/gma500/mdfld_dsi_output.c
drivers/gpu/drm/gma500/mdfld_intel_display.c
drivers/gpu/drm/gma500/oaktrail.h
drivers/gpu/drm/gma500/oaktrail_crtc.c
drivers/gpu/drm/gma500/oaktrail_device.c
drivers/gpu/drm/gma500/oaktrail_hdmi.c
drivers/gpu/drm/gma500/oaktrail_lvds.c
drivers/gpu/drm/gma500/psb_intel_lvds.c
drivers/gpu/drm/gma500/psb_intel_sdvo.c
drivers/gpu/drm/i2c/ch7006_drv.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_context.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/i915_suspend.c
drivers/gpu/drm/i915/i915_sysfs.c
drivers/gpu/drm/i915/i915_trace.h
drivers/gpu/drm/i915/intel_bios.c
drivers/gpu/drm/i915/intel_crt.c
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_i2c.c
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_modes.c
drivers/gpu/drm/i915/intel_opregion.c
drivers/gpu/drm/i915/intel_panel.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_ringbuffer.h
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/i915/intel_sprite.c
drivers/gpu/drm/i915/intel_tv.c
drivers/gpu/drm/mgag200/mgag200_main.c
drivers/gpu/drm/mgag200/mgag200_ttm.c
drivers/gpu/drm/nouveau/Makefile
drivers/gpu/drm/nouveau/core/core/engctx.c
drivers/gpu/drm/nouveau/core/core/falcon.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/core/gpuobj.c
drivers/gpu/drm/nouveau/core/core/mm.c
drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c
drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c
drivers/gpu/drm/nouveau/core/engine/copy/nve0.c
drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
drivers/gpu/drm/nouveau/core/engine/disp/nv50.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/engine/disp/nv84.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/engine/disp/nv94.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/engine/disp/nva0.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/engine/disp/nva3.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
drivers/gpu/drm/nouveau/core/engine/disp/nve0.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c
drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c
drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c
drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c
drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/engine/fifo/base.c
drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c
drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c
drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c
drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
drivers/gpu/drm/nouveau/core/engine/graph/nv04.c
drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
drivers/gpu/drm/nouveau/core/engine/graph/regs.h
drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c
drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c
drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/engine/software/nv04.c
drivers/gpu/drm/nouveau/core/engine/software/nv10.c
drivers/gpu/drm/nouveau/core/engine/software/nv50.c
drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
drivers/gpu/drm/nouveau/core/engine/vp/nv84.c
drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/engine/vp/nve0.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/include/core/class.h
drivers/gpu/drm/nouveau/core/include/core/engctx.h
drivers/gpu/drm/nouveau/core/include/core/falcon.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/include/core/gpuobj.h
drivers/gpu/drm/nouveau/core/include/core/mm.h
drivers/gpu/drm/nouveau/core/include/core/object.h
drivers/gpu/drm/nouveau/core/include/core/parent.h
drivers/gpu/drm/nouveau/core/include/engine/bsp.h
drivers/gpu/drm/nouveau/core/include/engine/copy.h
drivers/gpu/drm/nouveau/core/include/engine/crypt.h
drivers/gpu/drm/nouveau/core/include/engine/disp.h
drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h
drivers/gpu/drm/nouveau/core/include/engine/fifo.h
drivers/gpu/drm/nouveau/core/include/engine/ppp.h
drivers/gpu/drm/nouveau/core/include/engine/vp.h
drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h
drivers/gpu/drm/nouveau/core/include/subdev/bios/disp.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h
drivers/gpu/drm/nouveau/core/include/subdev/fb.h
drivers/gpu/drm/nouveau/core/subdev/bar/base.c
drivers/gpu/drm/nouveau/core/subdev/bios/base.c
drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
drivers/gpu/drm/nouveau/core/subdev/bios/disp.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/subdev/bios/dp.c
drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c
drivers/gpu/drm/nouveau/core/subdev/bios/init.c
drivers/gpu/drm/nouveau/core/subdev/device/base.c
drivers/gpu/drm/nouveau/core/subdev/device/nv10.c
drivers/gpu/drm/nouveau/core/subdev/device/nv20.c
drivers/gpu/drm/nouveau/core/subdev/device/nv30.c
drivers/gpu/drm/nouveau/core/subdev/device/nv40.c
drivers/gpu/drm/nouveau/core/subdev/device/nv50.c
drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c
drivers/gpu/drm/nouveau/core/subdev/device/nve0.c
drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c
drivers/gpu/drm/nouveau/core/subdev/fb/base.c
drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c
drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c
drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c
drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c
drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c
drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c
drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c
drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c
drivers/gpu/drm/nouveau/core/subdev/mc/base.c
drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
drivers/gpu/drm/nouveau/nouveau_abi16.c
drivers/gpu/drm/nouveau/nouveau_acpi.c
drivers/gpu/drm/nouveau/nouveau_acpi.h
drivers/gpu/drm/nouveau/nouveau_bios.c
drivers/gpu/drm/nouveau/nouveau_bios.h
drivers/gpu/drm/nouveau/nouveau_bo.c
drivers/gpu/drm/nouveau/nouveau_bo.h
drivers/gpu/drm/nouveau/nouveau_chan.c
drivers/gpu/drm/nouveau/nouveau_connector.c
drivers/gpu/drm/nouveau/nouveau_connector.h
drivers/gpu/drm/nouveau/nouveau_crtc.h
drivers/gpu/drm/nouveau/nouveau_display.c
drivers/gpu/drm/nouveau/nouveau_dp.c
drivers/gpu/drm/nouveau/nouveau_drm.c
drivers/gpu/drm/nouveau/nouveau_drm.h
drivers/gpu/drm/nouveau/nouveau_encoder.h
drivers/gpu/drm/nouveau/nouveau_gem.c
drivers/gpu/drm/nouveau/nouveau_hdmi.c [deleted file]
drivers/gpu/drm/nouveau/nouveau_irq.c
drivers/gpu/drm/nouveau/nouveau_prime.c
drivers/gpu/drm/nouveau/nouveau_vga.c
drivers/gpu/drm/nouveau/nv04_crtc.c
drivers/gpu/drm/nouveau/nv04_display.c
drivers/gpu/drm/nouveau/nv10_fence.c
drivers/gpu/drm/nouveau/nv17_tv.c
drivers/gpu/drm/nouveau/nv50_crtc.c [deleted file]
drivers/gpu/drm/nouveau/nv50_cursor.c [deleted file]
drivers/gpu/drm/nouveau/nv50_dac.c [deleted file]
drivers/gpu/drm/nouveau/nv50_display.c
drivers/gpu/drm/nouveau/nv50_display.h
drivers/gpu/drm/nouveau/nv50_evo.c [deleted file]
drivers/gpu/drm/nouveau/nv50_evo.h [deleted file]
drivers/gpu/drm/nouveau/nv50_fence.c
drivers/gpu/drm/nouveau/nv50_pm.c
drivers/gpu/drm/nouveau/nv50_sor.c [deleted file]
drivers/gpu/drm/nouveau/nvc0_fence.c
drivers/gpu/drm/nouveau/nvd0_display.c [deleted file]
drivers/gpu/drm/radeon/atombios_crtc.c
drivers/gpu/drm/radeon/atombios_dp.c
drivers/gpu/drm/radeon/atombios_encoders.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/evergreen_cs.c
drivers/gpu/drm/radeon/evergreend.h
drivers/gpu/drm/radeon/ni.c
drivers/gpu/drm/radeon/nid.h
drivers/gpu/drm/radeon/r100.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/r600_cp.c
drivers/gpu/drm/radeon/r600_cs.c
drivers/gpu/drm/radeon/r600_reg.h
drivers/gpu/drm/radeon/r600d.h
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_asic.c
drivers/gpu/drm/radeon/radeon_asic.h
drivers/gpu/drm/radeon/radeon_combios.c
drivers/gpu/drm/radeon/radeon_connectors.c
drivers/gpu/drm/radeon/radeon_cp.c
drivers/gpu/drm/radeon/radeon_cs.c
drivers/gpu/drm/radeon/radeon_cursor.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_display.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/radeon_drv.h
drivers/gpu/drm/radeon/radeon_fence.c
drivers/gpu/drm/radeon/radeon_gart.c
drivers/gpu/drm/radeon/radeon_kms.c
drivers/gpu/drm/radeon/radeon_mode.h
drivers/gpu/drm/radeon/radeon_object.c
drivers/gpu/drm/radeon/radeon_object.h
drivers/gpu/drm/radeon/radeon_test.c
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/radeon/rv515.c
drivers/gpu/drm/radeon/rv770.c
drivers/gpu/drm/radeon/rv770d.h
drivers/gpu/drm/radeon/si.c
drivers/gpu/drm/radeon/sid.h
drivers/gpu/drm/shmobile/shmob_drm_crtc.c
drivers/gpu/drm/tegra/Kconfig [new file with mode: 0644]
drivers/gpu/drm/tegra/Makefile [new file with mode: 0644]
drivers/gpu/drm/tegra/dc.c [new file with mode: 0644]
drivers/gpu/drm/tegra/dc.h [new file with mode: 0644]
drivers/gpu/drm/tegra/drm.c [new file with mode: 0644]
drivers/gpu/drm/tegra/drm.h [new file with mode: 0644]
drivers/gpu/drm/tegra/fb.c [new file with mode: 0644]
drivers/gpu/drm/tegra/hdmi.c [new file with mode: 0644]
drivers/gpu/drm/tegra/hdmi.h [new file with mode: 0644]
drivers/gpu/drm/tegra/host1x.c [new file with mode: 0644]
drivers/gpu/drm/tegra/output.c [new file with mode: 0644]
drivers/gpu/drm/tegra/rgb.c [new file with mode: 0644]
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/ttm/ttm_bo_util.c
drivers/gpu/drm/ttm/ttm_bo_vm.c
drivers/gpu/drm/ttm/ttm_execbuf_util.c
drivers/gpu/drm/ttm/ttm_memory.c
drivers/gpu/drm/ttm/ttm_object.c
drivers/gpu/drm/udl/udl_connector.c
drivers/gpu/drm/vmwgfx/Makefile
drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h [new file with mode: 0644]
drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
drivers/gpu/drm/vmwgfx/vmwgfx_context.c [new file with mode: 0644]
drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h [new file with mode: 0644]
drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
drivers/gpu/drm/vmwgfx/vmwgfx_surface.c [new file with mode: 0644]
drivers/gpu/vga/vga_switcheroo.c
drivers/i2c/busses/Kconfig
drivers/i2c/busses/Makefile
drivers/i2c/busses/i2c-i801.c
drivers/i2c/busses/i2c-piix4.c
drivers/i2c/busses/i2c-viperboard.c [new file with mode: 0644]
drivers/iio/adc/Kconfig
drivers/iio/adc/Makefile
drivers/iio/adc/ti_am335x_adc.c [new file with mode: 0644]
drivers/iio/adc/viperboard_adc.c [new file with mode: 0644]
drivers/infiniband/ulp/srpt/ib_srpt.c
drivers/infiniband/ulp/srpt/ib_srpt.h
drivers/input/keyboard/Kconfig
drivers/input/misc/da9052_onkey.c
drivers/input/touchscreen/Kconfig
drivers/input/touchscreen/Makefile
drivers/input/touchscreen/da9052_tsi.c
drivers/input/touchscreen/ti_am335x_tsc.c [new file with mode: 0644]
drivers/input/touchscreen/ti_tscadc.c [deleted file]
drivers/mfd/Kconfig
drivers/mfd/Makefile
drivers/mfd/ab8500-core.c
drivers/mfd/arizona-core.c
drivers/mfd/arizona-irq.c
drivers/mfd/as3711.c [new file with mode: 0644]
drivers/mfd/da9052-core.c
drivers/mfd/da9052-irq.c [new file with mode: 0644]
drivers/mfd/db8500-prcmu.c
drivers/mfd/jz4740-adc.c
drivers/mfd/lpc_ich.c
drivers/mfd/mc13xxx-core.c
drivers/mfd/mc13xxx-i2c.c
drivers/mfd/mc13xxx-spi.c
drivers/mfd/mc13xxx.h
drivers/mfd/mfd-core.c
drivers/mfd/rc5t583-irq.c
drivers/mfd/retu-mfd.c [new file with mode: 0644]
drivers/mfd/rtsx_pcr.c
drivers/mfd/sec-irq.c
drivers/mfd/sta2x11-mfd.c
drivers/mfd/stmpe-i2c.c
drivers/mfd/stmpe.c
drivers/mfd/ti_am335x_tscadc.c [new file with mode: 0644]
drivers/mfd/tps6507x.c
drivers/mfd/tps65090.c
drivers/mfd/tps65217.c
drivers/mfd/tps6586x.c
drivers/mfd/tps65910-irq.c [deleted file]
drivers/mfd/tps65910.c
drivers/mfd/tps80031.c [new file with mode: 0644]
drivers/mfd/twl-core.c
drivers/mfd/twl4030-irq.c
drivers/mfd/twl4030-madc.c
drivers/mfd/twl4030-power.c
drivers/mfd/twl6030-irq.c
drivers/mfd/twl6040-irq.c [deleted file]
drivers/mfd/twl6040.c [moved from drivers/mfd/twl6040-core.c with 84% similarity]
drivers/mfd/viperboard.c [new file with mode: 0644]
drivers/mfd/wm5102-tables.c
drivers/mfd/wm8994-core.c
drivers/mmc/host/Makefile
drivers/mmc/host/rtsx_pci_sdmmc.c
drivers/power/da9052-battery.c
drivers/rtc/rtc-twl.c
drivers/scsi/qla2xxx/qla_target.c
drivers/scsi/qla2xxx/tcm_qla2xxx.c
drivers/scsi/scsi_lib.c
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target_configfs.c
drivers/target/iscsi/iscsi_target_core.h
drivers/target/iscsi/iscsi_target_erl1.c
drivers/target/iscsi/iscsi_target_erl2.c
drivers/target/iscsi/iscsi_target_login.c
drivers/target/iscsi/iscsi_target_nego.c
drivers/target/iscsi/iscsi_target_parameters.c
drivers/target/iscsi/iscsi_target_tmr.c
drivers/target/iscsi/iscsi_target_tq.c
drivers/target/iscsi/iscsi_target_util.c
drivers/target/loopback/tcm_loop.h
drivers/target/sbp/Kconfig
drivers/target/sbp/sbp_target.c
drivers/target/target_core_alua.c
drivers/target/target_core_alua.h
drivers/target/target_core_configfs.c
drivers/target/target_core_device.c
drivers/target/target_core_fabric_configfs.c
drivers/target/target_core_fabric_lib.c
drivers/target/target_core_file.c
drivers/target/target_core_file.h
drivers/target/target_core_hba.c
drivers/target/target_core_iblock.c
drivers/target/target_core_iblock.h
drivers/target/target_core_internal.h
drivers/target/target_core_pr.c
drivers/target/target_core_pr.h
drivers/target/target_core_pscsi.c
drivers/target/target_core_pscsi.h
drivers/target/target_core_rd.c
drivers/target/target_core_rd.h
drivers/target/target_core_sbc.c
drivers/target/target_core_spc.c
drivers/target/target_core_stat.c
drivers/target/target_core_tmr.c
drivers/target/target_core_tpg.c
drivers/target/target_core_transport.c
drivers/target/target_core_ua.c
drivers/target/target_core_ua.h
drivers/target/tcm_fc/tfc_sess.c
drivers/usb/phy/Kconfig
drivers/vhost/tcm_vhost.c
drivers/video/omap2/Kconfig
drivers/w1/masters/Kconfig
drivers/xen/swiotlb-xen.c
fs/Kconfig
fs/cifs/cifsacl.c
fs/ext3/inode.c
fs/ext3/super.c
fs/ext4/Kconfig
fs/ext4/Makefile
fs/ext4/acl.c
fs/ext4/dir.c
fs/ext4/ext4.h
fs/ext4/ext4_extents.h
fs/ext4/ext4_jbd2.h
fs/ext4/extents.c
fs/ext4/extents_status.c [new file with mode: 0644]
fs/ext4/extents_status.h [new file with mode: 0644]
fs/ext4/file.c
fs/ext4/fsync.c
fs/ext4/ialloc.c
fs/ext4/indirect.c
fs/ext4/inline.c [new file with mode: 0644]
fs/ext4/inode.c
fs/ext4/mballoc.c
fs/ext4/migrate.c
fs/ext4/move_extent.c
fs/ext4/namei.c
fs/ext4/page-io.c
fs/ext4/resize.c
fs/ext4/super.c
fs/ext4/symlink.c
fs/ext4/xattr.c
fs/ext4/xattr.h
fs/jbd2/journal.c
fs/jbd2/transaction.c
fs/nfs/idmap.c
fs/quota/quota.c
fs/udf/inode.c
include/asm-generic/pgtable.h
include/drm/drmP.h
include/drm/drm_crtc.h
include/drm/drm_crtc_helper.h
include/drm/drm_dp_helper.h
include/drm/drm_hashtab.h
include/drm/exynos_drm.h
include/drm/intel-gtt.h
include/drm/ttm/ttm_bo_api.h
include/drm/ttm/ttm_bo_driver.h
include/drm/ttm/ttm_execbuf_util.h
include/drm/ttm/ttm_memory.h
include/drm/ttm/ttm_object.h
include/linux/backing-dev.h
include/linux/blkdev.h
include/linux/bsg-lib.h
include/linux/cred.h
include/linux/dma-attrs.h
include/linux/efi.h
include/linux/extcon.h
include/linux/huge_mm.h
include/linux/hugetlb.h
include/linux/i2c/twl.h
include/linux/input/ti_am335x_tsc.h [new file with mode: 0644]
include/linux/input/ti_tscadc.h [deleted file]
include/linux/jbd2.h
include/linux/key.h
include/linux/kref.h
include/linux/mempolicy.h
include/linux/mfd/arizona/registers.h
include/linux/mfd/as3711.h [new file with mode: 0644]
include/linux/mfd/da9052/da9052.h
include/linux/mfd/da9055/core.h
include/linux/mfd/da9055/pdata.h
include/linux/mfd/da9055/reg.h
include/linux/mfd/rc5t583.h
include/linux/mfd/retu.h [new file with mode: 0644]
include/linux/mfd/sta2x11-mfd.h
include/linux/mfd/stmpe.h
include/linux/mfd/ti_am335x_tscadc.h [new file with mode: 0644]
include/linux/mfd/tps65090.h
include/linux/mfd/tps6586x.h
include/linux/mfd/tps65910.h
include/linux/mfd/tps80031.h [new file with mode: 0644]
include/linux/mfd/twl6040.h
include/linux/mfd/viperboard.h [new file with mode: 0644]
include/linux/migrate.h
include/linux/mm.h
include/linux/mm_types.h
include/linux/mmzone.h
include/linux/platform_data/ti_am335x_adc.h [new file with mode: 0644]
include/linux/rmap.h
include/linux/sched.h
include/linux/swiotlb.h
include/linux/vm_event_item.h
include/linux/vmstat.h
include/target/target_core_backend.h
include/target/target_core_base.h
include/target/target_core_fabric.h
include/trace/events/ext4.h
include/trace/events/migrate.h [new file with mode: 0644]
include/uapi/drm/drm.h
include/uapi/drm/exynos_drm.h
include/uapi/drm/i915_drm.h
include/uapi/drm/radeon_drm.h
include/uapi/linux/mempolicy.h
init/Kconfig
init/main.c
kernel/cred.c
kernel/fork.c
kernel/sched/core.c
kernel/sched/fair.c
kernel/sched/features.h
kernel/sched/sched.h
kernel/seccomp.c
kernel/sysctl.c
lib/swiotlb.c
mm/backing-dev.c
mm/compaction.c
mm/huge_memory.c
mm/hugetlb.c
mm/internal.h
mm/ksm.c
mm/memcontrol.c
mm/memory-failure.c
mm/memory.c
mm/memory_hotplug.c
mm/mempolicy.c
mm/migrate.c
mm/mmap.c
mm/mprotect.c
mm/mremap.c
mm/page_alloc.c
mm/pgtable-generic.c
mm/rmap.c
mm/vmstat.c
net/dns_resolver/dns_key.c
security/keys/key.c
security/keys/keyctl.c
security/keys/keyring.c
security/keys/process_keys.c
security/keys/request_key.c
security/smack/Kconfig
security/smack/smackfs.c
security/yama/yama_lsm.c

index f503090..e59480d 100644 (file)
@@ -91,3 +91,12 @@ transferred to 'device' domain. This attribute can be also used for
 dma_unmap_{single,page,sg} functions family to force buffer to stay in
 device domain after releasing a mapping for it. Use this attribute with
 care!
+
+DMA_ATTR_FORCE_CONTIGUOUS
+-------------------------
+
+By default DMA-mapping subsystem is allowed to assemble the buffer
+allocated by dma_alloc_attrs() function from individual pages if it can
+be mapped as contiguous chunk into device dma address space. By
+specifing this attribute the allocated buffer is forced to be contiguous
+also in physical memory.
index b030052..4ee2304 100644 (file)
@@ -1141,23 +1141,13 @@ int max_width, max_height;</synopsis>
             the <methodname>page_flip</methodname> operation will be called with a
             non-NULL <parameter>event</parameter> argument pointing to a
             <structname>drm_pending_vblank_event</structname> instance. Upon page
-            flip completion the driver must fill the
-            <parameter>event</parameter>::<structfield>event</structfield>
-            <structfield>sequence</structfield>, <structfield>tv_sec</structfield>
-            and <structfield>tv_usec</structfield> fields with the associated
-            vertical blanking count and timestamp, add the event to the
-            <parameter>drm_file</parameter> list of events to be signaled, and wake
-            up any waiting process. This can be performed with
+            flip completion the driver must call <methodname>drm_send_vblank_event</methodname>
+            to fill in the event and send to wake up any waiting processes.
+            This can be performed with
             <programlisting><![CDATA[
-            struct timeval now;
-
-            event->event.sequence = drm_vblank_count_and_time(..., &now);
-            event->event.tv_sec = now.tv_sec;
-            event->event.tv_usec = now.tv_usec;
-
             spin_lock_irqsave(&dev->event_lock, flags);
-            list_add_tail(&event->base.link, &event->base.file_priv->event_list);
-            wake_up_interruptible(&event->base.file_priv->event_wait);
+            ...
+            drm_send_vblank_event(dev, pipe, event);
             spin_unlock_irqrestore(&dev->event_lock, flags);
             ]]></programlisting>
           </para>
@@ -1621,10 +1611,10 @@ void intel_crt_init(struct drm_device *dev)
     </sect2>
   </sect1>
 
-  <!-- Internals: mid-layer helper functions -->
+  <!-- Internals: kms helper functions -->
 
   <sect1>
-    <title>Mid-layer Helper Functions</title>
+    <title>Mode Setting Helper Functions</title>
     <para>
       The CRTC, encoder and connector functions provided by the drivers
       implement the DRM API. They're called by the DRM core and ioctl handlers
@@ -2106,6 +2096,21 @@ void intel_crt_init(struct drm_device *dev)
         </listitem>
       </itemizedlist>
     </sect2>
+    <sect2>
+      <title>Modeset Helper Functions Reference</title>
+!Edrivers/gpu/drm/drm_crtc_helper.c
+    </sect2>
+    <sect2>
+      <title>fbdev Helper Functions Reference</title>
+!Pdrivers/gpu/drm/drm_fb_helper.c fbdev helpers
+!Edrivers/gpu/drm/drm_fb_helper.c
+    </sect2>
+    <sect2>
+      <title>Display Port Helper Functions Reference</title>
+!Pdrivers/gpu/drm/drm_dp_helper.c dp helpers
+!Iinclude/drm/drm_dp_helper.h
+!Edrivers/gpu/drm/drm_dp_helper.c
+    </sect2>
   </sect1>
 
   <!-- Internals: vertical blanking -->
diff --git a/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt b/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
new file mode 100644 (file)
index 0000000..b4fa934
--- /dev/null
@@ -0,0 +1,191 @@
+NVIDIA Tegra host1x
+
+Required properties:
+- compatible: "nvidia,tegra<chip>-host1x"
+- reg: Physical base address and length of the controller's registers.
+- interrupts: The interrupt outputs from the controller.
+- #address-cells: The number of cells used to represent physical base addresses
+  in the host1x address space. Should be 1.
+- #size-cells: The number of cells used to represent the size of an address
+  range in the host1x address space. Should be 1.
+- ranges: The mapping of the host1x address space to the CPU address space.
+
+The host1x top-level node defines a number of children, each representing one
+of the following host1x client modules:
+
+- mpe: video encoder
+
+  Required properties:
+  - compatible: "nvidia,tegra<chip>-mpe"
+  - reg: Physical base address and length of the controller's registers.
+  - interrupts: The interrupt outputs from the controller.
+
+- vi: video input
+
+  Required properties:
+  - compatible: "nvidia,tegra<chip>-vi"
+  - reg: Physical base address and length of the controller's registers.
+  - interrupts: The interrupt outputs from the controller.
+
+- epp: encoder pre-processor
+
+  Required properties:
+  - compatible: "nvidia,tegra<chip>-epp"
+  - reg: Physical base address and length of the controller's registers.
+  - interrupts: The interrupt outputs from the controller.
+
+- isp: image signal processor
+
+  Required properties:
+  - compatible: "nvidia,tegra<chip>-isp"
+  - reg: Physical base address and length of the controller's registers.
+  - interrupts: The interrupt outputs from the controller.
+
+- gr2d: 2D graphics engine
+
+  Required properties:
+  - compatible: "nvidia,tegra<chip>-gr2d"
+  - reg: Physical base address and length of the controller's registers.
+  - interrupts: The interrupt outputs from the controller.
+
+- gr3d: 3D graphics engine
+
+  Required properties:
+  - compatible: "nvidia,tegra<chip>-gr3d"
+  - reg: Physical base address and length of the controller's registers.
+
+- dc: display controller
+
+  Required properties:
+  - compatible: "nvidia,tegra<chip>-dc"
+  - reg: Physical base address and length of the controller's registers.
+  - interrupts: The interrupt outputs from the controller.
+
+  Each display controller node has a child node, named "rgb", that represents
+  the RGB output associated with the controller. It can take the following
+  optional properties:
+  - nvidia,ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing
+  - nvidia,hpd-gpio: specifies a GPIO used for hotplug detection
+  - nvidia,edid: supplies a binary EDID blob
+
+- hdmi: High Definition Multimedia Interface
+
+  Required properties:
+  - compatible: "nvidia,tegra<chip>-hdmi"
+  - reg: Physical base address and length of the controller's registers.
+  - interrupts: The interrupt outputs from the controller.
+  - vdd-supply: regulator for supply voltage
+  - pll-supply: regulator for PLL
+
+  Optional properties:
+  - nvidia,ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing
+  - nvidia,hpd-gpio: specifies a GPIO used for hotplug detection
+  - nvidia,edid: supplies a binary EDID blob
+
+- tvo: TV encoder output
+
+  Required properties:
+  - compatible: "nvidia,tegra<chip>-tvo"
+  - reg: Physical base address and length of the controller's registers.
+  - interrupts: The interrupt outputs from the controller.
+
+- dsi: display serial interface
+
+  Required properties:
+  - compatible: "nvidia,tegra<chip>-dsi"
+  - reg: Physical base address and length of the controller's registers.
+
+Example:
+
+/ {
+       ...
+
+       host1x {
+               compatible = "nvidia,tegra20-host1x", "simple-bus";
+               reg = <0x50000000 0x00024000>;
+               interrupts = <0 65 0x04   /* mpcore syncpt */
+                             0 67 0x04>; /* mpcore general */
+
+               #address-cells = <1>;
+               #size-cells = <1>;
+
+               ranges = <0x54000000 0x54000000 0x04000000>;
+
+               mpe {
+                       compatible = "nvidia,tegra20-mpe";
+                       reg = <0x54040000 0x00040000>;
+                       interrupts = <0 68 0x04>;
+               };
+
+               vi {
+                       compatible = "nvidia,tegra20-vi";
+                       reg = <0x54080000 0x00040000>;
+                       interrupts = <0 69 0x04>;
+               };
+
+               epp {
+                       compatible = "nvidia,tegra20-epp";
+                       reg = <0x540c0000 0x00040000>;
+                       interrupts = <0 70 0x04>;
+               };
+
+               isp {
+                       compatible = "nvidia,tegra20-isp";
+                       reg = <0x54100000 0x00040000>;
+                       interrupts = <0 71 0x04>;
+               };
+
+               gr2d {
+                       compatible = "nvidia,tegra20-gr2d";
+                       reg = <0x54140000 0x00040000>;
+                       interrupts = <0 72 0x04>;
+               };
+
+               gr3d {
+                       compatible = "nvidia,tegra20-gr3d";
+                       reg = <0x54180000 0x00040000>;
+               };
+
+               dc@54200000 {
+                       compatible = "nvidia,tegra20-dc";
+                       reg = <0x54200000 0x00040000>;
+                       interrupts = <0 73 0x04>;
+
+                       rgb {
+                               status = "disabled";
+                       };
+               };
+
+               dc@54240000 {
+                       compatible = "nvidia,tegra20-dc";
+                       reg = <0x54240000 0x00040000>;
+                       interrupts = <0 74 0x04>;
+
+                       rgb {
+                               status = "disabled";
+                       };
+               };
+
+               hdmi {
+                       compatible = "nvidia,tegra20-hdmi";
+                       reg = <0x54280000 0x00040000>;
+                       interrupts = <0 75 0x04>;
+                       status = "disabled";
+               };
+
+               tvo {
+                       compatible = "nvidia,tegra20-tvo";
+                       reg = <0x542c0000 0x00040000>;
+                       interrupts = <0 76 0x04>;
+                       status = "disabled";
+               };
+
+               dsi {
+                       compatible = "nvidia,tegra20-dsi";
+                       reg = <0x54300000 0x00040000>;
+                       status = "disabled";
+               };
+       };
+
+       ...
+};
diff --git a/Documentation/devicetree/bindings/mfd/stmpe.txt b/Documentation/devicetree/bindings/mfd/stmpe.txt
new file mode 100644 (file)
index 0000000..56edb55
--- /dev/null
@@ -0,0 +1,28 @@
+* ST Microelectronics STMPE Multi-Functional Device
+
+STMPE is an MFD device which may expose the following inbuilt devices: gpio,
+keypad, touchscreen, adc, pwm, rotator.
+
+Required properties:
+ - compatible                   : "st,stmpe[610|801|811|1601|2401|2403]"
+ - reg                          : I2C/SPI address of the device
+
+Optional properties:
+ - interrupts                   : The interrupt outputs from the controller
+ - interrupt-controller         : Marks the device node as an interrupt controller
+ - interrupt-parent             : Specifies which IRQ controller we're connected to
+ - wakeup-source                : Marks the input device as wakable
+ - st,autosleep-timeout         : Valid entries (ms); 4, 16, 32, 64, 128, 256, 512 and 1024
+
+Example:
+
+       stmpe1601: stmpe1601@40 {
+               compatible = "st,stmpe1601";
+               reg = <0x40>;
+               interrupts = <26 0x4>;
+               interrupt-parent = <&gpio6>;
+               interrupt-controller;
+
+               wakeup-source;
+               st,autosleep-timeout = <1024>;
+       };
index d316fb8..4f05d20 100644 (file)
@@ -11,6 +11,9 @@ Required properties:
   using the standard binding for regulators found at
   Documentation/devicetree/bindings/regulator/regulator.txt.
 
+Optional properties:
+- ti,pmic-shutdown-controller: Telling the PMIC to shutdown on PWR_EN toggle.
+
   The valid names for regulators are:
   tps65217: dcdc1, dcdc2, dcdc3, ldo1, ldo2, ldo3 and ldo4
 
@@ -20,6 +23,7 @@ Example:
 
        tps: tps@24 {
                compatible = "ti,tps65217";
+               ti,pmic-shutdown-controller;
 
                regulators {
                        dcdc1_reg: dcdc1 {
index 104322b..34ea4f1 100644 (file)
@@ -200,12 +200,9 @@ inode_readahead_blks=n     This tuning parameter controls the maximum
                        table readahead algorithm will pre-read into
                        the buffer cache.  The default value is 32 blocks.
 
-nouser_xattr           Disables Extended User Attributes. If you have extended
-                       attribute support enabled in the kernel configuration
-                       (CONFIG_EXT4_FS_XATTR), extended attribute support
-                       is enabled by default on mount. See the attr(5) manual
-                       page and http://acl.bestbits.at/ for more information
-                       about extended attributes.
+nouser_xattr           Disables Extended User Attributes.  See the
+                       attr(5) manual page and http://acl.bestbits.at/
+                       for more information about extended attributes.
 
 noacl                  This option disables POSIX Access Control List
                        support. If ACL support is enabled in the kernel
index 49f5b68..d1f2261 100644 (file)
@@ -23,6 +23,12 @@ don't match these function names.  For some of the operations which pass a
 single data byte, the functions using SMBus protocol operation names execute
 a different protocol operation entirely.
 
+Each transaction type corresponds to a functionality flag. Before calling a
+transaction function, a device driver should always check (just once) for
+the corresponding functionality flag to ensure that the underlying I2C
+adapter supports the transaction in question. See
+<file:Documentation/i2c/functionality> for the details.
+
 
 Key to symbols
 ==============
@@ -49,6 +55,8 @@ This sends a single bit to the device, at the place of the Rd/Wr bit.
 
 A Addr Rd/Wr [A] P
 
+Functionality flag: I2C_FUNC_SMBUS_QUICK
+
 
 SMBus Receive Byte:  i2c_smbus_read_byte()
 ==========================================
@@ -60,6 +68,8 @@ the previous SMBus command.
 
 S Addr Rd [A] [Data] NA P
 
+Functionality flag: I2C_FUNC_SMBUS_READ_BYTE
+
 
 SMBus Send Byte:  i2c_smbus_write_byte()
 ========================================
@@ -69,6 +79,8 @@ to a device.  See Receive Byte for more information.
 
 S Addr Wr [A] Data [A] P
 
+Functionality flag: I2C_FUNC_SMBUS_WRITE_BYTE
+
 
 SMBus Read Byte:  i2c_smbus_read_byte_data()
 ============================================
@@ -78,6 +90,8 @@ The register is specified through the Comm byte.
 
 S Addr Wr [A] Comm [A] S Addr Rd [A] [Data] NA P
 
+Functionality flag: I2C_FUNC_SMBUS_READ_BYTE_DATA
+
 
 SMBus Read Word:  i2c_smbus_read_word_data()
 ============================================
@@ -88,6 +102,8 @@ byte. But this time, the data is a complete word (16 bits).
 
 S Addr Wr [A] Comm [A] S Addr Rd [A] [DataLow] A [DataHigh] NA P
 
+Functionality flag: I2C_FUNC_SMBUS_READ_WORD_DATA
+
 Note the convenience function i2c_smbus_read_word_swapped is
 available for reads where the two data bytes are the other way
 around (not SMBus compliant, but very popular.)
@@ -102,6 +118,8 @@ the Read Byte operation.
 
 S Addr Wr [A] Comm [A] Data [A] P
 
+Functionality flag: I2C_FUNC_SMBUS_WRITE_BYTE_DATA
+
 
 SMBus Write Word:  i2c_smbus_write_word_data()
 ==============================================
@@ -112,6 +130,8 @@ specified through the Comm byte.
 
 S Addr Wr [A] Comm [A] DataLow [A] DataHigh [A] P
 
+Functionality flag: I2C_FUNC_SMBUS_WRITE_WORD_DATA
+
 Note the convenience function i2c_smbus_write_word_swapped is
 available for writes where the two data bytes are the other way
 around (not SMBus compliant, but very popular.)
@@ -126,6 +146,8 @@ This command selects a device register (through the Comm byte), sends
 S Addr Wr [A] Comm [A] DataLow [A] DataHigh [A] 
                              S Addr Rd [A] [DataLow] A [DataHigh] NA P
 
+Functionality flag: I2C_FUNC_SMBUS_PROC_CALL
+
 
 SMBus Block Read:  i2c_smbus_read_block_data()
 ==============================================
@@ -137,6 +159,8 @@ of data is specified by the device in the Count byte.
 S Addr Wr [A] Comm [A] 
            S Addr Rd [A] [Count] A [Data] A [Data] A ... A [Data] NA P
 
+Functionality flag: I2C_FUNC_SMBUS_READ_BLOCK_DATA
+
 
 SMBus Block Write:  i2c_smbus_write_block_data()
 ================================================
@@ -147,6 +171,8 @@ Comm byte. The amount of data is specified in the Count byte.
 
 S Addr Wr [A] Comm [A] Count [A] Data [A] Data [A] ... [A] Data [A] P
 
+Functionality flag: I2C_FUNC_SMBUS_WRITE_BLOCK_DATA
+
 
 SMBus Block Write - Block Read Process Call
 ===========================================
@@ -160,6 +186,8 @@ This command selects a device register (through the Comm byte), sends
 S Addr Wr [A] Comm [A] Count [A] Data [A] ...
                              S Addr Rd [A] [Count] A [Data] ... A P
 
+Functionality flag: I2C_FUNC_SMBUS_BLOCK_PROC_CALL
+
 
 SMBus Host Notify
 =================
@@ -229,15 +257,7 @@ designated register that is specified through the Comm byte.
 S Addr Wr [A] Comm [A] 
            S Addr Rd [A] [Data] A [Data] A ... A [Data] NA P
 
-
-I2C Block Read (2 Comm bytes)
-=============================
-
-This command reads a block of bytes from a device, from a 
-designated register that is specified through the two Comm bytes.
-
-S Addr Wr [A] Comm1 [A] Comm2 [A] 
-           S Addr Rd [A] [Data] A [Data] A ... A [Data] NA P
+Functionality flag: I2C_FUNC_SMBUS_READ_I2C_BLOCK
 
 
 I2C Block Write:  i2c_smbus_write_i2c_block_data()
@@ -249,3 +269,5 @@ Comm byte. Note that command lengths of 0, 2, or more bytes are
 supported as they are indistinguishable from data.
 
 S Addr Wr [A] Comm [A] Data [A] Data [A] ... [A] Data [A] P
+
+Functionality flag: I2C_FUNC_SMBUS_WRITE_I2C_BLOCK
index 20e248c..ea8e5b4 100644 (file)
@@ -2032,6 +2032,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
 
        nr_uarts=       [SERIAL] maximum number of UARTs to be registered.
 
+       numa_balancing= [KNL,X86] Enable or disable automatic NUMA balancing.
+                       Allowed values are enable and disable
+
        numa_zonelist_order= [KNL, BOOT] Select zonelist order for NUMA.
                        one of ['zone', 'node', 'default'] can be specified
                        This can be set from sysctl after boot.
index 48ba715..ddf85a5 100644 (file)
@@ -213,3 +213,91 @@ presentation on krefs, which can be found at:
 and:
   http://www.kroah.com/linux/talks/ols_2004_kref_talk/
 
+
+The above example could also be optimized using kref_get_unless_zero() in
+the following way:
+
+static struct my_data *get_entry()
+{
+       struct my_data *entry = NULL;
+       mutex_lock(&mutex);
+       if (!list_empty(&q)) {
+               entry = container_of(q.next, struct my_data, link);
+               if (!kref_get_unless_zero(&entry->refcount))
+                       entry = NULL;
+       }
+       mutex_unlock(&mutex);
+       return entry;
+}
+
+static void release_entry(struct kref *ref)
+{
+       struct my_data *entry = container_of(ref, struct my_data, refcount);
+
+       mutex_lock(&mutex);
+       list_del(&entry->link);
+       mutex_unlock(&mutex);
+       kfree(entry);
+}
+
+static void put_entry(struct my_data *entry)
+{
+       kref_put(&entry->refcount, release_entry);
+}
+
+Which is useful to remove the mutex lock around kref_put() in put_entry(), but
+it's important that kref_get_unless_zero is enclosed in the same critical
+section that finds the entry in the lookup table,
+otherwise kref_get_unless_zero may reference already freed memory.
+Note that it is illegal to use kref_get_unless_zero without checking its
+return value. If you are sure (by already having a valid pointer) that
+kref_get_unless_zero() will return true, then use kref_get() instead.
+
+The function kref_get_unless_zero also makes it possible to use rcu
+locking for lookups in the above example:
+
+struct my_data
+{
+       struct rcu_head rhead;
+       .
+       struct kref refcount;
+       .
+       .
+};
+
+static struct my_data *get_entry_rcu()
+{
+       struct my_data *entry = NULL;
+       rcu_read_lock();
+       if (!list_empty(&q)) {
+               entry = container_of(q.next, struct my_data, link);
+               if (!kref_get_unless_zero(&entry->refcount))
+                       entry = NULL;
+       }
+       rcu_read_unlock();
+       return entry;
+}
+
+static void release_entry_rcu(struct kref *ref)
+{
+       struct my_data *entry = container_of(ref, struct my_data, refcount);
+
+       mutex_lock(&mutex);
+       list_del_rcu(&entry->link);
+       mutex_unlock(&mutex);
+       kfree_rcu(entry, rhead);
+}
+
+static void put_entry(struct my_data *entry)
+{
+       kref_put(&entry->refcount, release_entry_rcu);
+}
+
+But note that the struct kref member needs to remain in valid memory for a
+rcu grace period after release_entry_rcu was called. That can be accomplished
+by using kfree_rcu(entry, rhead) as done above, or by calling synchronize_rcu()
+before using kfree, but note that synchronize_rcu() may sleep for a
+substantial amount of time.
+
+
+Thomas Hellstrom <thellstrom@vmware.com>
index 597c3c5..1e469ef 100644 (file)
@@ -95,12 +95,15 @@ SECCOMP_RET_KILL:
 
 SECCOMP_RET_TRAP:
        Results in the kernel sending a SIGSYS signal to the triggering
-       task without executing the system call.  The kernel will
-       rollback the register state to just before the system call
-       entry such that a signal handler in the task will be able to
-       inspect the ucontext_t->uc_mcontext registers and emulate
-       system call success or failure upon return from the signal
-       handler.
+       task without executing the system call.  siginfo->si_call_addr
+       will show the address of the system call instruction, and
+       siginfo->si_syscall and siginfo->si_arch will indicate which
+       syscall was attempted.  The program counter will be as though
+       the syscall happened (i.e. it will not point to the syscall
+       instruction).  The return value register will contain an arch-
+       dependent value -- if resuming execution, set it to something
+       sensible.  (The architecture dependency is because replacing
+       it with -ENOSYS could overwrite some useful information.)
 
        The SECCOMP_RET_DATA portion of the return value will be passed
        as si_errno.
@@ -123,6 +126,18 @@ SECCOMP_RET_TRACE:
        the BPF program return value will be available to the tracer
        via PTRACE_GETEVENTMSG.
 
+       The tracer can skip the system call by changing the syscall number
+       to -1.  Alternatively, the tracer can change the system call
+       requested by changing the system call to a valid syscall number.  If
+       the tracer asks to skip the system call, then the system call will
+       appear to return the value that the tracer puts in the return value
+       register.
+
+       The seccomp check will not be run again after the tracer is
+       notified.  (This means that seccomp-based sandboxes MUST NOT
+       allow use of ptrace, even of other sandboxed processes, without
+       extreme care; ptracers can use this mechanism to escape.)
+
 SECCOMP_RET_ALLOW:
        Results in the system call being executed.
 
@@ -161,3 +176,50 @@ architecture supports both ptrace_event and seccomp, it will be able to
 support seccomp filter with minor fixup: SIGSYS support and seccomp return
 value checking.  Then it must just add CONFIG_HAVE_ARCH_SECCOMP_FILTER
 to its arch-specific Kconfig.
+
+
+
+Caveats
+-------
+
+The vDSO can cause some system calls to run entirely in userspace,
+leading to surprises when you run programs on different machines that
+fall back to real syscalls.  To minimize these surprises on x86, make
+sure you test with
+/sys/devices/system/clocksource/clocksource0/current_clocksource set to
+something like acpi_pm.
+
+On x86-64, vsyscall emulation is enabled by default.  (vsyscalls are
+legacy variants on vDSO calls.)  Currently, emulated vsyscalls will honor seccomp, with a few oddities:
+
+- A return value of SECCOMP_RET_TRAP will set a si_call_addr pointing to
+  the vsyscall entry for the given call and not the address after the
+  'syscall' instruction.  Any code which wants to restart the call
+  should be aware that (a) a ret instruction has been emulated and (b)
+  trying to resume the syscall will again trigger the standard vsyscall
+  emulation security checks, making resuming the syscall mostly
+  pointless.
+
+- A return value of SECCOMP_RET_TRACE will signal the tracer as usual,
+  but the syscall may not be changed to another system call using the
+  orig_rax register. It may only be changed to -1 order to skip the
+  currently emulated call. Any other change MAY terminate the process.
+  The rip value seen by the tracer will be the syscall entry address;
+  this is different from normal behavior.  The tracer MUST NOT modify
+  rip or rsp.  (Do not rely on other changes terminating the process.
+  They might work.  For example, on some kernels, choosing a syscall
+  that only exists in future kernels will be correctly emulated (by
+  returning -ENOSYS).
+
+To detect this quirky behavior, check for addr & ~0x0C00 ==
+0xFFFFFFFFFF600000.  (For SECCOMP_RET_TRACE, use rip.  For
+SECCOMP_RET_TRAP, use siginfo->si_call_addr.)  Do not check any other
+condition: future kernels may improve vsyscall emulation and current
+kernels in vsyscall=native mode will behave differently, but the
+instructions at 0xF...F600{0,4,8,C}00 will not be system calls in these
+cases.
+
+Note that modern systems are unlikely to use vsyscalls at all -- they
+are a legacy feature and they are considerably slower than standard
+syscalls.  New code will use the vDSO, and vDSO-issued system calls
+are indistinguishable from normal system calls.
index 7d9ca92..7b4145d 100644 (file)
@@ -994,6 +994,23 @@ payload contents" for more information.
     reference pointer if successful.
 
 
+(*) A keyring can be created by:
+
+       struct key *keyring_alloc(const char *description, uid_t uid, gid_t gid,
+                                 const struct cred *cred,
+                                 key_perm_t perm,
+                                 unsigned long flags,
+                                 struct key *dest);
+
+    This creates a keyring with the given attributes and returns it.  If dest
+    is not NULL, the new keyring will be linked into the keyring to which it
+    points.  No permission checks are made upon the destination keyring.
+
+    Error EDQUOT can be returned if the keyring would overload the quota (pass
+    KEY_ALLOC_NOT_IN_QUOTA in flags if the keyring shouldn't be accounted
+    towards the user's quota).  Error ENOMEM can also be returned.
+
+
 (*) To check the validity of a key, this function can be called:
 
        int validate_key(struct key *key);
index f71d2f9..6892b26 100644 (file)
@@ -2549,6 +2549,15 @@ S:       Supported
 F:     drivers/gpu/drm/exynos
 F:     include/drm/exynos*
 
+DRM DRIVERS FOR NVIDIA TEGRA
+M:     Thierry Reding <thierry.reding@avionic-design.de>
+L:     dri-devel@lists.freedesktop.org
+L:     linux-tegra@vger.kernel.org
+T:     git git://gitorious.org/thierryreding/linux.git
+S:     Maintained
+F:     drivers/gpu/drm/tegra/
+F:     Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
+
 DSCC4 DRIVER
 M:     Francois Romieu <romieu@fr.zoreil.com>
 L:     netdev@vger.kernel.org
@@ -3712,7 +3721,7 @@ I2C/SMBUS STUB DRIVER
 M:     "Mark M. Hoffman" <mhoffman@lightlink.com>
 L:     linux-i2c@vger.kernel.org
 S:     Maintained
-F:     drivers/i2c/busses/i2c-stub.c
+F:     drivers/i2c/i2c-stub.c
 
 I2C SUBSYSTEM
 M:     Wolfram Sang <w.sang@pengutronix.de>
index b401585..d077ef8 100644 (file)
@@ -132,8 +132,8 @@ dtb-$(CONFIG_ARCH_SPEAR3XX)+= spear300-evb.dtb \
        spear320-evb.dtb \
        spear320-hmi.dtb
 dtb-$(CONFIG_ARCH_SPEAR6XX)+= spear600-evb.dtb
-dtb-$(CONFIG_ARCH_SUNXI) += sun4i-cubieboard.dtb \
-       sun5i-olinuxino.dtb
+dtb-$(CONFIG_ARCH_SUNXI) += sun4i-a10-cubieboard.dtb \
+       sun5i-a13-olinuxino.dtb
 dtb-$(CONFIG_ARCH_TEGRA) += tegra20-harmony.dtb \
        tegra20-medcom-wide.dtb \
        tegra20-paz00.dtb \
index f4ca126..5cab825 100644 (file)
  */
 
 /dts-v1/;
-/include/ "sun4i.dtsi"
+/include/ "sun4i-a10.dtsi"
 
 / {
        model = "Cubietech Cubieboard";
-       compatible = "cubietech,cubieboard", "allwinner,sun4i";
+       compatible = "cubietech,a10-cubieboard", "allwinner,sun4i-a10";
 
        aliases {
                serial0 = &uart0;
index d6ff889..498a091 100644 (file)
  */
 
 /dts-v1/;
-/include/ "sun5i.dtsi"
+/include/ "sun5i-a13.dtsi"
 
 / {
        model = "Olimex A13-Olinuxino";
-       compatible = "olimex,a13-olinuxino", "allwinner,sun5i";
+       compatible = "olimex,a13-olinuxino", "allwinner,sun5i-a13";
 
        chosen {
                bootargs = "earlyprintk console=ttyS0,115200";
index 7211772..0299915 100644 (file)
@@ -41,6 +41,7 @@
 #include <mach/cp_intc.h>
 #include <mach/da8xx.h>
 #include <mach/mux.h>
+#include <mach/sram.h>
 
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
index dac146d..04744f9 100644 (file)
@@ -25,7 +25,7 @@ void exynos_init_late(void);
 #ifdef CONFIG_PM_GENERIC_DOMAINS
 int exynos_pm_late_initcall(void);
 #else
-static int exynos_pm_late_initcall(void) { return 0; }
+static inline int exynos_pm_late_initcall(void) { return 0; }
 #endif
 
 #ifdef CONFIG_ARCH_EXYNOS4
index aa56c3e..5789a5e 100644 (file)
 #define OMAP4430_MODULEMODE_HWCTRL_SHIFT               0
 #define OMAP4430_MODULEMODE_SWCTRL_SHIFT               1
 
+/*
+ * OMAP4 ABE DPLL default frequency. In OMAP4460 TRM version V, section
+ * "3.6.3.2.3 CM1_ABE Clock Generator" states that the "DPLL_ABE_X2_CLK
+ * must be set to 196.608 MHz" and hence, the DPLL locked frequency is
+ * half of this value.
+ */
+#define OMAP4_DPLL_ABE_DEFFREQ                         98304000
+
 /* Root clocks */
 
 DEFINE_CLK_FIXED_RATE(extalt_clkin_ck, CLK_IS_ROOT, 59000000, 0x0);
@@ -124,6 +132,8 @@ static struct dpll_data dpll_abe_dd = {
        .enable_mask    = OMAP4430_DPLL_EN_MASK,
        .autoidle_mask  = OMAP4430_AUTO_DPLL_MODE_MASK,
        .idlest_mask    = OMAP4430_ST_DPLL_CLK_MASK,
+       .m4xen_mask     = OMAP4430_DPLL_REGM4XEN_MASK,
+       .lpmode_mask    = OMAP4430_DPLL_LPMODE_EN_MASK,
        .max_multiplier = 2047,
        .max_divider    = 128,
        .min_divider    = 1,
@@ -233,7 +243,7 @@ static struct dpll_data dpll_core_dd = {
 
 
 static const char *dpll_core_ck_parents[] = {
-       "sys_clkin_ck",
+       "sys_clkin_ck", "core_hsd_byp_clk_mux_ck"
 };
 
 static struct clk dpll_core_ck;
@@ -286,9 +296,9 @@ DEFINE_CLK_DIVIDER(div_core_ck, "dpll_core_m5x2_ck", &dpll_core_m5x2_ck, 0x0,
                   OMAP4430_CM_CLKSEL_CORE, OMAP4430_CLKSEL_CORE_SHIFT,
                   OMAP4430_CLKSEL_CORE_WIDTH, 0x0, NULL);
 
-DEFINE_CLK_OMAP_HSDIVIDER(div_iva_hs_clk, "dpll_core_m5x2_ck",
-                         &dpll_core_m5x2_ck, 0x0, OMAP4430_CM_BYPCLK_DPLL_IVA,
-                         OMAP4430_CLKSEL_0_1_MASK);
+DEFINE_CLK_DIVIDER(div_iva_hs_clk, "dpll_core_m5x2_ck", &dpll_core_m5x2_ck,
+                  0x0, OMAP4430_CM_BYPCLK_DPLL_IVA, OMAP4430_CLKSEL_0_1_SHIFT,
+                  OMAP4430_CLKSEL_0_1_WIDTH, CLK_DIVIDER_POWER_OF_TWO, NULL);
 
 DEFINE_CLK_DIVIDER(div_mpu_hs_clk, "dpll_core_m5x2_ck", &dpll_core_m5x2_ck,
                   0x0, OMAP4430_CM_BYPCLK_DPLL_MPU, OMAP4430_CLKSEL_0_1_SHIFT,
@@ -363,8 +373,21 @@ static struct dpll_data dpll_iva_dd = {
        .min_divider    = 1,
 };
 
+static const char *dpll_iva_ck_parents[] = {
+       "sys_clkin_ck", "iva_hsd_byp_clk_mux_ck"
+};
+
 static struct clk dpll_iva_ck;
 
+static const struct clk_ops dpll_ck_ops = {
+       .enable         = &omap3_noncore_dpll_enable,
+       .disable        = &omap3_noncore_dpll_disable,
+       .recalc_rate    = &omap3_dpll_recalc,
+       .round_rate     = &omap2_dpll_round_rate,
+       .set_rate       = &omap3_noncore_dpll_set_rate,
+       .get_parent     = &omap2_init_dpll_parent,
+};
+
 static struct clk_hw_omap dpll_iva_ck_hw = {
        .hw = {
                .clk = &dpll_iva_ck,
@@ -373,7 +396,7 @@ static struct clk_hw_omap dpll_iva_ck_hw = {
        .ops            = &clkhwops_omap3_dpll,
 };
 
-DEFINE_STRUCT_CLK(dpll_iva_ck, dpll_core_ck_parents, dpll_abe_ck_ops);
+DEFINE_STRUCT_CLK(dpll_iva_ck, dpll_iva_ck_parents, dpll_ck_ops);
 
 static const char *dpll_iva_x2_ck_parents[] = {
        "dpll_iva_ck",
@@ -416,6 +439,10 @@ static struct dpll_data dpll_mpu_dd = {
        .min_divider    = 1,
 };
 
+static const char *dpll_mpu_ck_parents[] = {
+       "sys_clkin_ck", "div_mpu_hs_clk"
+};
+
 static struct clk dpll_mpu_ck;
 
 static struct clk_hw_omap dpll_mpu_ck_hw = {
@@ -426,7 +453,7 @@ static struct clk_hw_omap dpll_mpu_ck_hw = {
        .ops            = &clkhwops_omap3_dpll,
 };
 
-DEFINE_STRUCT_CLK(dpll_mpu_ck, dpll_core_ck_parents, dpll_abe_ck_ops);
+DEFINE_STRUCT_CLK(dpll_mpu_ck, dpll_mpu_ck_parents, dpll_ck_ops);
 
 DEFINE_CLK_FIXED_FACTOR(mpu_periphclk, "dpll_mpu_ck", &dpll_mpu_ck, 0x0, 1, 2);
 
@@ -464,6 +491,9 @@ static struct dpll_data dpll_per_dd = {
        .min_divider    = 1,
 };
 
+static const char *dpll_per_ck_parents[] = {
+       "sys_clkin_ck", "per_hsd_byp_clk_mux_ck"
+};
 
 static struct clk dpll_per_ck;
 
@@ -475,7 +505,7 @@ static struct clk_hw_omap dpll_per_ck_hw = {
        .ops            = &clkhwops_omap3_dpll,
 };
 
-DEFINE_STRUCT_CLK(dpll_per_ck, dpll_core_ck_parents, dpll_abe_ck_ops);
+DEFINE_STRUCT_CLK(dpll_per_ck, dpll_per_ck_parents, dpll_ck_ops);
 
 DEFINE_CLK_DIVIDER(dpll_per_m2_ck, "dpll_per_ck", &dpll_per_ck, 0x0,
                   OMAP4430_CM_DIV_M2_DPLL_PER, OMAP4430_DPLL_CLKOUT_DIV_SHIFT,
@@ -559,6 +589,10 @@ static struct dpll_data dpll_usb_dd = {
        .min_divider    = 1,
 };
 
+static const char *dpll_usb_ck_parents[] = {
+       "sys_clkin_ck", "usb_hs_clk_div_ck"
+};
+
 static struct clk dpll_usb_ck;
 
 static struct clk_hw_omap dpll_usb_ck_hw = {
@@ -569,7 +603,7 @@ static struct clk_hw_omap dpll_usb_ck_hw = {
        .ops            = &clkhwops_omap3_dpll,
 };
 
-DEFINE_STRUCT_CLK(dpll_usb_ck, dpll_core_ck_parents, dpll_abe_ck_ops);
+DEFINE_STRUCT_CLK(dpll_usb_ck, dpll_usb_ck_parents, dpll_ck_ops);
 
 static const char *dpll_usb_clkdcoldo_ck_parents[] = {
        "dpll_usb_ck",
@@ -696,9 +730,13 @@ DEFINE_CLK_DIVIDER(syc_clk_div_ck, "sys_clkin_ck", &sys_clkin_ck, 0x0,
                   OMAP4430_CM_ABE_DSS_SYS_CLKSEL, OMAP4430_CLKSEL_0_0_SHIFT,
                   OMAP4430_CLKSEL_0_0_WIDTH, 0x0, NULL);
 
+static const char *dbgclk_mux_ck_parents[] = {
+       "sys_clkin_ck"
+};
+
 static struct clk dbgclk_mux_ck;
 DEFINE_STRUCT_CLK_HW_OMAP(dbgclk_mux_ck, NULL);
-DEFINE_STRUCT_CLK(dbgclk_mux_ck, dpll_core_ck_parents,
+DEFINE_STRUCT_CLK(dbgclk_mux_ck, dbgclk_mux_ck_parents,
                  dpll_usb_clkdcoldo_ck_ops);
 
 /* Leaf clocks controlled by modules */
@@ -1935,10 +1973,10 @@ static struct omap_clk omap44xx_clks[] = {
        CLK("4803e000.timer",   "timer_sys_ck", &sys_clkin_ck,  CK_443X),
        CLK("48086000.timer",   "timer_sys_ck", &sys_clkin_ck,  CK_443X),
        CLK("48088000.timer",   "timer_sys_ck", &sys_clkin_ck,  CK_443X),
-       CLK("49038000.timer",   "timer_sys_ck", &syc_clk_div_ck,        CK_443X),
-       CLK("4903a000.timer",   "timer_sys_ck", &syc_clk_div_ck,        CK_443X),
-       CLK("4903c000.timer",   "timer_sys_ck", &syc_clk_div_ck,        CK_443X),
-       CLK("4903e000.timer",   "timer_sys_ck", &syc_clk_div_ck,        CK_443X),
+       CLK("40138000.timer",   "timer_sys_ck", &syc_clk_div_ck,        CK_443X),
+       CLK("4013a000.timer",   "timer_sys_ck", &syc_clk_div_ck,        CK_443X),
+       CLK("4013c000.timer",   "timer_sys_ck", &syc_clk_div_ck,        CK_443X),
+       CLK("4013e000.timer",   "timer_sys_ck", &syc_clk_div_ck,        CK_443X),
        CLK(NULL,       "cpufreq_ck",   &dpll_mpu_ck,   CK_443X),
 };
 
@@ -1955,6 +1993,7 @@ int __init omap4xxx_clk_init(void)
 {
        u32 cpu_clkflg;
        struct omap_clk *c;
+       int rc;
 
        if (cpu_is_omap443x()) {
                cpu_mask = RATE_IN_4430;
@@ -1983,5 +2022,18 @@ int __init omap4xxx_clk_init(void)
        omap2_clk_enable_init_clocks(enable_init_clks,
                                     ARRAY_SIZE(enable_init_clks));
 
+       /*
+        * On OMAP4460 the ABE DPLL fails to turn on if in idle low-power
+        * state when turning the ABE clock domain. Workaround this by
+        * locking the ABE DPLL on boot.
+        */
+       if (cpu_is_omap446x()) {
+               rc = clk_set_parent(&abe_dpll_refclk_mux_ck, &sys_32k_ck);
+               if (!rc)
+                       rc = clk_set_rate(&dpll_abe_ck, OMAP4_DPLL_ABE_DEFFREQ);
+               if (rc)
+                       pr_err("%s: failed to configure ABE DPLL!\n", __func__);
+       }
+
        return 0;
 }
index 9917f79..b402048 100644 (file)
@@ -195,6 +195,10 @@ struct clksel {
  * @enable_mask: mask of the DPLL mode bitfield in @control_reg
  * @last_rounded_rate: cache of the last rate result of omap2_dpll_round_rate()
  * @last_rounded_m: cache of the last M result of omap2_dpll_round_rate()
+ * @last_rounded_m4xen: cache of the last M4X result of
+ *                     omap4_dpll_regm4xen_round_rate()
+ * @last_rounded_lpmode: cache of the last lpmode result of
+ *                      omap4_dpll_lpmode_recalc()
  * @max_multiplier: maximum valid non-bypass multiplier value (actual)
  * @last_rounded_n: cache of the last N result of omap2_dpll_round_rate()
  * @min_divider: minimum valid non-bypass divider value (actual)
@@ -205,6 +209,8 @@ struct clksel {
  * @autoidle_mask: mask of the DPLL autoidle mode bitfield in @autoidle_reg
  * @freqsel_mask: mask of the DPLL jitter correction bitfield in @control_reg
  * @idlest_mask: mask of the DPLL idle status bitfield in @idlest_reg
+ * @lpmode_mask: mask of the DPLL low-power mode bitfield in @control_reg
+ * @m4xen_mask: mask of the DPLL M4X multiplier bitfield in @control_reg
  * @auto_recal_bit: bitshift of the driftguard enable bit in @control_reg
  * @recal_en_bit: bitshift of the PRM_IRQENABLE_* bit for recalibration IRQs
  * @recal_st_bit: bitshift of the PRM_IRQSTATUS_* bit for recalibration IRQs
@@ -233,6 +239,8 @@ struct dpll_data {
        u32                     enable_mask;
        unsigned long           last_rounded_rate;
        u16                     last_rounded_m;
+       u8                      last_rounded_m4xen;
+       u8                      last_rounded_lpmode;
        u16                     max_multiplier;
        u8                      last_rounded_n;
        u8                      min_divider;
@@ -245,6 +253,8 @@ struct dpll_data {
        u32                     idlest_mask;
        u32                     dco_mask;
        u32                     sddiv_mask;
+       u32                     lpmode_mask;
+       u32                     m4xen_mask;
        u8                      auto_recal_bit;
        u8                      recal_en_bit;
        u8                      recal_st_bit;
index 3848735..7faf82d 100644 (file)
@@ -998,7 +998,8 @@ int clkdm_clk_disable(struct clockdomain *clkdm, struct clk *clk)
        spin_lock_irqsave(&clkdm->lock, flags);
 
        /* corner case: disabling unused clocks */
-       if (__clk_get_enable_count(clk) == 0)
+       if ((__clk_get_enable_count(clk) == 0) &&
+           (atomic_read(&clkdm->usecount) == 0))
                goto ccd_exit;
 
        if (atomic_read(&clkdm->usecount) == 0) {
index bca7a88..22590db 100644 (file)
@@ -40,6 +40,8 @@ struct omap3_idle_statedata {
        u32 core_state;
 };
 
+static struct powerdomain *mpu_pd, *core_pd, *per_pd, *cam_pd;
+
 static struct omap3_idle_statedata omap3_idle_data[] = {
        {
                .mpu_state = PWRDM_POWER_ON,
@@ -71,7 +73,7 @@ static struct omap3_idle_statedata omap3_idle_data[] = {
        },
 };
 
-static struct powerdomain *mpu_pd, *core_pd, *per_pd, *cam_pd;
+/* Private functions */
 
 static int __omap3_enter_idle(struct cpuidle_device *dev,
                                struct cpuidle_driver *drv,
@@ -260,11 +262,11 @@ static int omap3_enter_idle_bm(struct cpuidle_device *dev,
        return ret;
 }
 
-DEFINE_PER_CPU(struct cpuidle_device, omap3_idle_dev);
+static DEFINE_PER_CPU(struct cpuidle_device, omap3_idle_dev);
 
-struct cpuidle_driver omap3_idle_driver = {
-       .name =         "omap3_idle",
-       .owner =        THIS_MODULE,
+static struct cpuidle_driver omap3_idle_driver = {
+       .name =         "omap3_idle",
+       .owner =        THIS_MODULE,
        .states = {
                {
                        .enter            = omap3_enter_idle_bm,
@@ -327,6 +329,8 @@ struct cpuidle_driver omap3_idle_driver = {
        .safe_state_index = 0,
 };
 
+/* Public functions */
+
 /**
  * omap3_idle_init - Init routine for OMAP3 idle
  *
index 288bee6..d639aef 100644 (file)
@@ -54,6 +54,8 @@ static struct clockdomain *cpu_clkdm[NR_CPUS];
 static atomic_t abort_barrier;
 static bool cpu_done[NR_CPUS];
 
+/* Private functions */
+
 /**
  * omap4_enter_idle_coupled_[simple/coupled] - OMAP4 cpuidle entry functions
  * @dev: cpuidle device
@@ -161,9 +163,19 @@ fail:
        return index;
 }
 
-DEFINE_PER_CPU(struct cpuidle_device, omap4_idle_dev);
+/*
+ * For each cpu, setup the broadcast timer because local timers
+ * stops for the states above C1.
+ */
+static void omap_setup_broadcast_timer(void *arg)
+{
+       int cpu = smp_processor_id();
+       clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &cpu);
+}
+
+static DEFINE_PER_CPU(struct cpuidle_device, omap4_idle_dev);
 
-struct cpuidle_driver omap4_idle_driver = {
+static struct cpuidle_driver omap4_idle_driver = {
        .name                           = "omap4_idle",
        .owner                          = THIS_MODULE,
        .en_core_tk_irqen               = 1,
@@ -178,7 +190,7 @@ struct cpuidle_driver omap4_idle_driver = {
                        .desc = "MPUSS ON"
                },
                {
-                        /* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */
+                       /* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */
                        .exit_latency = 328 + 440,
                        .target_residency = 960,
                        .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED,
@@ -200,15 +212,7 @@ struct cpuidle_driver omap4_idle_driver = {
        .safe_state_index = 0,
 };
 
-/*
- * For each cpu, setup the broadcast timer because local timers
- * stops for the states above C1.
- */
-static void omap_setup_broadcast_timer(void *arg)
-{
-       int cpu = smp_processor_id();
-       clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &cpu);
-}
+/* Public functions */
 
 /**
  * omap4_idle_init - Init routine for OMAP4 idle
index fafb28c..2bb1883 100644 (file)
@@ -291,16 +291,13 @@ static void _lookup_sddiv(struct clk_hw_omap *clk, u8 *sd_div, u16 m, u8 n)
 
 /*
  * _omap3_noncore_dpll_program - set non-core DPLL M,N values directly
- * @clk: struct clk * of DPLL to set
- * @m: DPLL multiplier to set
- * @n: DPLL divider to set
- * @freqsel: FREQSEL value to set
+ * @clk:       struct clk * of DPLL to set
+ * @freqsel:   FREQSEL value to set
  *
- * Program the DPLL with the supplied M, N values, and wait for the DPLL to
- * lock..  Returns -EINVAL upon error, or 0 upon success.
+ * Program the DPLL with the last M, N values calculated, and wait for
+ * the DPLL to lock. Returns -EINVAL upon error, or 0 upon success.
  */
-static int omap3_noncore_dpll_program(struct clk_hw_omap *clk, u16 m, u8 n,
-                                     u16 freqsel)
+static int omap3_noncore_dpll_program(struct clk_hw_omap *clk, u16 freqsel)
 {
        struct dpll_data *dd = clk->dpll_data;
        u8 dco, sd_div;
@@ -323,23 +320,45 @@ static int omap3_noncore_dpll_program(struct clk_hw_omap *clk, u16 m, u8 n,
        /* Set DPLL multiplier, divider */
        v = __raw_readl(dd->mult_div1_reg);
        v &= ~(dd->mult_mask | dd->div1_mask);
-       v |= m << __ffs(dd->mult_mask);
-       v |= (n - 1) << __ffs(dd->div1_mask);
+       v |= dd->last_rounded_m << __ffs(dd->mult_mask);
+       v |= (dd->last_rounded_n - 1) << __ffs(dd->div1_mask);
 
        /* Configure dco and sd_div for dplls that have these fields */
        if (dd->dco_mask) {
-               _lookup_dco(clk, &dco, m, n);
+               _lookup_dco(clk, &dco, dd->last_rounded_m, dd->last_rounded_n);
                v &= ~(dd->dco_mask);
                v |= dco << __ffs(dd->dco_mask);
        }
        if (dd->sddiv_mask) {
-               _lookup_sddiv(clk, &sd_div, m, n);
+               _lookup_sddiv(clk, &sd_div, dd->last_rounded_m,
+                             dd->last_rounded_n);
                v &= ~(dd->sddiv_mask);
                v |= sd_div << __ffs(dd->sddiv_mask);
        }
 
        __raw_writel(v, dd->mult_div1_reg);
 
+       /* Set 4X multiplier and low-power mode */
+       if (dd->m4xen_mask || dd->lpmode_mask) {
+               v = __raw_readl(dd->control_reg);
+
+               if (dd->m4xen_mask) {
+                       if (dd->last_rounded_m4xen)
+                               v |= dd->m4xen_mask;
+                       else
+                               v &= ~dd->m4xen_mask;
+               }
+
+               if (dd->lpmode_mask) {
+                       if (dd->last_rounded_lpmode)
+                               v |= dd->lpmode_mask;
+                       else
+                               v &= ~dd->lpmode_mask;
+               }
+
+               __raw_writel(v, dd->control_reg);
+       }
+
        /* We let the clock framework set the other output dividers later */
 
        /* REVISIT: Set ramp-up delay? */
@@ -492,8 +511,7 @@ int omap3_noncore_dpll_set_rate(struct clk_hw *hw, unsigned long rate,
                pr_debug("%s: %s: set rate: locking rate to %lu.\n",
                         __func__, __clk_get_name(hw->clk), rate);
 
-               ret = omap3_noncore_dpll_program(clk, dd->last_rounded_m,
-                                               dd->last_rounded_n, freqsel);
+               ret = omap3_noncore_dpll_program(clk, freqsel);
                if (!ret)
                        new_parent = dd->clk_ref;
        }
index d3326c4..d28b0f7 100644 (file)
 #include "clock44xx.h"
 #include "cm-regbits-44xx.h"
 
+/*
+ * Maximum DPLL input frequency (FINT) and output frequency (FOUT) that
+ * can supported when using the DPLL low-power mode. Frequencies are
+ * defined in OMAP4430/60 Public TRM section 3.6.3.3.2 "Enable Control,
+ * Status, and Low-Power Operation Mode".
+ */
+#define OMAP4_DPLL_LP_FINT_MAX 1000000
+#define OMAP4_DPLL_LP_FOUT_MAX 100000000
+
 /* Supported only on OMAP4 */
 int omap4_dpllmx_gatectrl_read(struct clk_hw_omap *clk)
 {
@@ -82,6 +91,31 @@ const struct clk_hw_omap_ops clkhwops_omap4_dpllmx = {
 };
 
 /**
+ * omap4_dpll_lpmode_recalc - compute DPLL low-power setting
+ * @dd: pointer to the dpll data structure
+ *
+ * Calculates if low-power mode can be enabled based upon the last
+ * multiplier and divider values calculated. If low-power mode can be
+ * enabled, then the bit to enable low-power mode is stored in the
+ * last_rounded_lpmode variable. This implementation is based upon the
+ * criteria for enabling low-power mode as described in the OMAP4430/60
+ * Public TRM section 3.6.3.3.2 "Enable Control, Status, and Low-Power
+ * Operation Mode".
+ */
+static void omap4_dpll_lpmode_recalc(struct dpll_data *dd)
+{
+       long fint, fout;
+
+       fint = __clk_get_rate(dd->clk_ref) / (dd->last_rounded_n + 1);
+       fout = fint * dd->last_rounded_m;
+
+       if ((fint < OMAP4_DPLL_LP_FINT_MAX) && (fout < OMAP4_DPLL_LP_FOUT_MAX))
+               dd->last_rounded_lpmode = 1;
+       else
+               dd->last_rounded_lpmode = 0;
+}
+
+/**
  * omap4_dpll_regm4xen_recalc - compute DPLL rate, considering REGM4XEN bit
  * @clk: struct clk * of the DPLL to compute the rate for
  *
@@ -130,7 +164,6 @@ long omap4_dpll_regm4xen_round_rate(struct clk_hw *hw,
                                    unsigned long *parent_rate)
 {
        struct clk_hw_omap *clk = to_clk_hw_omap(hw);
-       u32 v;
        struct dpll_data *dd;
        long r;
 
@@ -139,18 +172,31 @@ long omap4_dpll_regm4xen_round_rate(struct clk_hw *hw,
 
        dd = clk->dpll_data;
 
-       /* regm4xen adds a multiplier of 4 to DPLL calculations */
-       v = __raw_readl(dd->control_reg) & OMAP4430_DPLL_REGM4XEN_MASK;
-
-       if (v)
-               target_rate = target_rate / OMAP4430_REGM4XEN_MULT;
+       dd->last_rounded_m4xen = 0;
 
+       /*
+        * First try to compute the DPLL configuration for
+        * target rate without using the 4X multiplier.
+        */
        r = omap2_dpll_round_rate(hw, target_rate, NULL);
+       if (r != ~0)
+               goto out;
+
+       /*
+        * If we did not find a valid DPLL configuration, try again, but
+        * this time see if using the 4X multiplier can help. Enabling the
+        * 4X multiplier is equivalent to dividing the target rate by 4.
+        */
+       r = omap2_dpll_round_rate(hw, target_rate / OMAP4430_REGM4XEN_MULT,
+                                 NULL);
        if (r == ~0)
                return r;
 
-       if (v)
-               clk->dpll_data->last_rounded_rate *= OMAP4430_REGM4XEN_MULT;
+       dd->last_rounded_rate *= OMAP4430_REGM4XEN_MULT;
+       dd->last_rounded_m4xen = 1;
+
+out:
+       omap4_dpll_lpmode_recalc(dd);
 
-       return clk->dpll_data->last_rounded_rate;
+       return dd->last_rounded_rate;
 }
index 0816562..d54cfc5 100644 (file)
@@ -104,7 +104,7 @@ static __initdata struct tegra_clk_init_table tegra20_clk_init_table[] = {
 static __initdata struct tegra_clk_init_table tegra30_clk_init_table[] = {
        /* name         parent          rate            enabled */
        { "clk_m",      NULL,           0,              true },
-       { "pll_p",      "clk_m",        408000000,      true },
+       { "pll_p",      "pll_ref",      408000000,      true },
        { "pll_p_out1", "pll_p",        9600000,        true },
        { "pll_p_out4", "pll_p",        102000000,      true },
        { "sclk",       "pll_p_out4",   102000000,      true },
index efc000e..d714777 100644 (file)
@@ -2045,9 +2045,7 @@ struct clk_ops tegra30_periph_clk_ops = {
 static int tegra30_dsib_clk_set_parent(struct clk_hw *hw, u8 index)
 {
        struct clk *d = clk_get_sys(NULL, "pll_d");
-       /* The DSIB parent selection bit is in PLLD base
-          register - can not do direct r-m-w, must be
-          protected by PLLD lock */
+       /* The DSIB parent selection bit is in PLLD base register */
        tegra_clk_cfg_ex(
                d, TEGRA_CLK_PLLD_MIPI_MUX_SEL, index);
 
index 12f3994..0374b98 100644 (file)
@@ -27,7 +27,6 @@
 #include <linux/mtd/nand.h>
 #include <linux/mtd/fsmc.h>
 #include <linux/pinctrl/machine.h>
-#include <linux/pinctrl/consumer.h>
 #include <linux/pinctrl/pinconf-generic.h>
 #include <linux/dma-mapping.h>
 #include <linux/platform_data/clk-u300.h>
@@ -1543,39 +1542,6 @@ static struct pinctrl_map __initdata u300_pinmux_map[] = {
                                    pin_highz_conf),
 };
 
-struct u300_mux_hog {
-       struct device *dev;
-       struct pinctrl *p;
-};
-
-static struct u300_mux_hog u300_mux_hogs[] = {
-       {
-               .dev = &uart0_device.dev,
-       },
-       {
-               .dev = &mmcsd_device.dev,
-       },
-};
-
-static int __init u300_pinctrl_fetch(void)
-{
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(u300_mux_hogs); i++) {
-               struct pinctrl *p;
-
-               p = pinctrl_get_select_default(u300_mux_hogs[i].dev);
-               if (IS_ERR(p)) {
-                       pr_err("u300: could not get pinmux hog for dev %s\n",
-                              dev_name(u300_mux_hogs[i].dev));
-                       continue;
-               }
-               u300_mux_hogs[i].p = p;
-       }
-       return 0;
-}
-subsys_initcall(u300_pinctrl_fetch);
-
 /*
  * Notice that AMBA devices are initialized before platform devices.
  *
index 4b24c99..a5e05f6 100644 (file)
@@ -8,6 +8,7 @@
 #ifndef __DEVICES_DB8500_H
 #define __DEVICES_DB8500_H
 
+#include <linux/platform_data/usb-musb-ux500.h>
 #include <mach/irqs.h>
 #include "devices-common.h"
 
index 5383bc0..6b2fb87 100644 (file)
@@ -1034,7 +1034,8 @@ static inline void __free_iova(struct dma_iommu_mapping *mapping,
        spin_unlock_irqrestore(&mapping->lock, flags);
 }
 
-static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t gfp)
+static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
+                                         gfp_t gfp, struct dma_attrs *attrs)
 {
        struct page **pages;
        int count = size >> PAGE_SHIFT;
@@ -1048,6 +1049,23 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t
        if (!pages)
                return NULL;
 
+       if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs))
+       {
+               unsigned long order = get_order(size);
+               struct page *page;
+
+               page = dma_alloc_from_contiguous(dev, count, order);
+               if (!page)
+                       goto error;
+
+               __dma_clear_buffer(page, size);
+
+               for (i = 0; i < count; i++)
+                       pages[i] = page + i;
+
+               return pages;
+       }
+
        while (count) {
                int j, order = __fls(count);
 
@@ -1081,14 +1099,21 @@ error:
        return NULL;
 }
 
-static int __iommu_free_buffer(struct device *dev, struct page **pages, size_t size)
+static int __iommu_free_buffer(struct device *dev, struct page **pages,
+                              size_t size, struct dma_attrs *attrs)
 {
        int count = size >> PAGE_SHIFT;
        int array_size = count * sizeof(struct page *);
        int i;
-       for (i = 0; i < count; i++)
-               if (pages[i])
-                       __free_pages(pages[i], 0);
+
+       if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) {
+               dma_release_from_contiguous(dev, pages[0], count);
+       } else {
+               for (i = 0; i < count; i++)
+                       if (pages[i])
+                               __free_pages(pages[i], 0);
+       }
+
        if (array_size <= PAGE_SIZE)
                kfree(pages);
        else
@@ -1250,7 +1275,7 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
        if (gfp & GFP_ATOMIC)
                return __iommu_alloc_atomic(dev, size, handle);
 
-       pages = __iommu_alloc_buffer(dev, size, gfp);
+       pages = __iommu_alloc_buffer(dev, size, gfp, attrs);
        if (!pages)
                return NULL;
 
@@ -1271,7 +1296,7 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
 err_mapping:
        __iommu_remove_mapping(dev, *handle, size);
 err_buffer:
-       __iommu_free_buffer(dev, pages, size);
+       __iommu_free_buffer(dev, pages, size, attrs);
        return NULL;
 }
 
@@ -1327,7 +1352,7 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
        }
 
        __iommu_remove_mapping(dev, handle, size);
-       __iommu_free_buffer(dev, pages, size);
+       __iommu_free_buffer(dev, pages, size, attrs);
 }
 
 static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
index 2f2d87b..b1cfff8 100644 (file)
@@ -35,7 +35,8 @@ endchoice
 if M68KCLASSIC
 
 config M68000
-       bool
+       bool "MC68000"
+       depends on !MMU
        select CPU_HAS_NO_BITFIELDS
        select CPU_HAS_NO_MULDIV64
        select CPU_HAS_NO_UNALIGNED
index 7636751..2f02acf 100644 (file)
@@ -92,7 +92,7 @@ endif
 head-y                         := arch/m68k/kernel/head.o
 head-$(CONFIG_SUN3)            := arch/m68k/kernel/sun3-head.o
 head-$(CONFIG_M68360)          := arch/m68k/platform/68360/head.o
-head-$(CONFIG_M68000)          := arch/m68k/platform/68328/head.o
+head-$(CONFIG_M68000)          := arch/m68k/platform/68000/head.o
 head-$(CONFIG_COLDFIRE)                := arch/m68k/platform/coldfire/head.o
 
 core-y                         += arch/m68k/kernel/    arch/m68k/mm/
@@ -114,9 +114,7 @@ core-$(CONFIG_M68040)               += arch/m68k/fpsp040/
 core-$(CONFIG_M68060)          += arch/m68k/ifpsp060/
 core-$(CONFIG_M68KFPU_EMU)     += arch/m68k/math-emu/
 core-$(CONFIG_M68360)          += arch/m68k/platform/68360/
-core-$(CONFIG_M68000)          += arch/m68k/platform/68328/
-core-$(CONFIG_M68EZ328)                += arch/m68k/platform/68EZ328/
-core-$(CONFIG_M68VZ328)                += arch/m68k/platform/68VZ328/
+core-$(CONFIG_M68000)          += arch/m68k/platform/68000/
 core-$(CONFIG_COLDFIRE)                += arch/m68k/platform/coldfire/
 
 
diff --git a/arch/m68k/include/asm/m5249sim.h b/arch/m68k/include/asm/m5249sim.h
deleted file mode 100644 (file)
index fdf45e6..0000000
+++ /dev/null
@@ -1,269 +0,0 @@
-/****************************************************************************/
-
-/*
- *     m5249sim.h -- ColdFire 5249 System Integration Module support.
- *
- *     (C) Copyright 2002, Greg Ungerer (gerg@snapgear.com)
- */
-
-/****************************************************************************/
-#ifndef        m5249sim_h
-#define        m5249sim_h
-/****************************************************************************/
-
-#define        CPU_NAME                "COLDFIRE(m5249)"
-#define        CPU_INSTR_PER_JIFFY     3
-#define        MCF_BUSCLK              (MCF_CLK / 2)
-
-#include <asm/m52xxacr.h>
-
-/*
- *     The 5249 has a second MBAR region, define its address.
- */
-#define MCF_MBAR2              0x80000000
-
-/*
- *     Define the 5249 SIM register set addresses.
- */
-#define        MCFSIM_RSR              (MCF_MBAR + 0x00)       /* Reset Status */
-#define        MCFSIM_SYPCR            (MCF_MBAR + 0x01)       /* System Protection */
-#define        MCFSIM_SWIVR            (MCF_MBAR + 0x02)       /* SW Watchdog intr */
-#define        MCFSIM_SWSR             (MCF_MBAR + 0x03)       /* SW Watchdog srv */
-#define        MCFSIM_PAR              (MCF_MBAR + 0x04)       /* Pin Assignment */
-#define        MCFSIM_IRQPAR           (MCF_MBAR + 0x06)       /* Intr Assignment */
-#define        MCFSIM_MPARK            (MCF_MBAR + 0x0C)       /* BUS Master Ctrl */
-#define        MCFSIM_IPR              (MCF_MBAR + 0x40)       /* Interrupt Pending */
-#define        MCFSIM_IMR              (MCF_MBAR + 0x44)       /* Interrupt Mask */
-#define        MCFSIM_AVR              (MCF_MBAR + 0x4b)       /* Autovector Ctrl */
-#define        MCFSIM_ICR0             (MCF_MBAR + 0x4c)       /* Intr Ctrl reg 0 */
-#define        MCFSIM_ICR1             (MCF_MBAR + 0x4d)       /* Intr Ctrl reg 1 */
-#define        MCFSIM_ICR2             (MCF_MBAR + 0x4e)       /* Intr Ctrl reg 2 */
-#define        MCFSIM_ICR3             (MCF_MBAR + 0x4f)       /* Intr Ctrl reg 3 */
-#define        MCFSIM_ICR4             (MCF_MBAR + 0x50)       /* Intr Ctrl reg 4 */
-#define        MCFSIM_ICR5             (MCF_MBAR + 0x51)       /* Intr Ctrl reg 5 */
-#define        MCFSIM_ICR6             (MCF_MBAR + 0x52)       /* Intr Ctrl reg 6 */
-#define        MCFSIM_ICR7             (MCF_MBAR + 0x53)       /* Intr Ctrl reg 7 */
-#define        MCFSIM_ICR8             (MCF_MBAR + 0x54)       /* Intr Ctrl reg 8 */
-#define        MCFSIM_ICR9             (MCF_MBAR + 0x55)       /* Intr Ctrl reg 9 */
-#define        MCFSIM_ICR10            (MCF_MBAR + 0x56)       /* Intr Ctrl reg 10 */
-#define        MCFSIM_ICR11            (MCF_MBAR + 0x57)       /* Intr Ctrl reg 11 */
-
-#define        MCFSIM_CSAR0            (MCF_MBAR + 0x80)       /* CS 0 Address reg */
-#define        MCFSIM_CSMR0            (MCF_MBAR + 0x84)       /* CS 0 Mask reg */
-#define        MCFSIM_CSCR0            (MCF_MBAR + 0x8a)       /* CS 0 Control reg */
-#define        MCFSIM_CSAR1            (MCF_MBAR + 0x8c)       /* CS 1 Address reg */
-#define        MCFSIM_CSMR1            (MCF_MBAR + 0x90)       /* CS 1 Mask reg */
-#define        MCFSIM_CSCR1            (MCF_MBAR + 0x96)       /* CS 1 Control reg */
-#define        MCFSIM_CSAR2            (MCF_MBAR + 0x98)       /* CS 2 Address reg */
-#define        MCFSIM_CSMR2            (MCF_MBAR + 0x9c)       /* CS 2 Mask reg */
-#define        MCFSIM_CSCR2            (MCF_MBAR + 0xa2)       /* CS 2 Control reg */
-#define        MCFSIM_CSAR3            (MCF_MBAR + 0xa4)       /* CS 3 Address reg */
-#define        MCFSIM_CSMR3            (MCF_MBAR + 0xa8)       /* CS 3 Mask reg */
-#define        MCFSIM_CSCR3            (MCF_MBAR + 0xae)       /* CS 3 Control reg */
-
-#define MCFSIM_DCR             (MCF_MBAR + 0x100)      /* DRAM Control */
-#define MCFSIM_DACR0           (MCF_MBAR + 0x108)      /* DRAM 0 Addr/Ctrl */
-#define MCFSIM_DMR0            (MCF_MBAR + 0x10c)      /* DRAM 0 Mask */
-#define MCFSIM_DACR1           (MCF_MBAR + 0x110)      /* DRAM 1 Addr/Ctrl */
-#define MCFSIM_DMR1            (MCF_MBAR + 0x114)      /* DRAM 1 Mask */
-
-/*
- *     Timer module.
- */
-#define MCFTIMER_BASE1         (MCF_MBAR + 0x140)      /* Base of TIMER1 */
-#define MCFTIMER_BASE2         (MCF_MBAR + 0x180)      /* Base of TIMER2 */
-
-/*
- *     UART module.
- */
-#define MCFUART_BASE0          (MCF_MBAR + 0x1c0)      /* Base address UART0 */
-#define MCFUART_BASE1          (MCF_MBAR + 0x200)      /* Base address UART1 */
-
-/*
- *     QSPI module.
- */
-#define        MCFQSPI_BASE            (MCF_MBAR + 0x300)      /* Base address QSPI */
-#define        MCFQSPI_SIZE            0x40                    /* Register set size */
-
-#define        MCFQSPI_CS0             29
-#define        MCFQSPI_CS1             24
-#define        MCFQSPI_CS2             21
-#define        MCFQSPI_CS3             22
-
-/*
- *     DMA unit base addresses.
- */
-#define MCFDMA_BASE0           (MCF_MBAR + 0x300)      /* Base address DMA 0 */
-#define MCFDMA_BASE1           (MCF_MBAR + 0x340)      /* Base address DMA 1 */
-#define MCFDMA_BASE2           (MCF_MBAR + 0x380)      /* Base address DMA 2 */
-#define MCFDMA_BASE3           (MCF_MBAR + 0x3C0)      /* Base address DMA 3 */
-
-/*
- *     Some symbol defines for the above...
- */
-#define        MCFSIM_SWDICR           MCFSIM_ICR0     /* Watchdog timer ICR */
-#define        MCFSIM_TIMER1ICR        MCFSIM_ICR1     /* Timer 1 ICR */
-#define        MCFSIM_TIMER2ICR        MCFSIM_ICR2     /* Timer 2 ICR */
-#define        MCFSIM_UART1ICR         MCFSIM_ICR4     /* UART 1 ICR */
-#define        MCFSIM_UART2ICR         MCFSIM_ICR5     /* UART 2 ICR */
-#define        MCFSIM_DMA0ICR          MCFSIM_ICR6     /* DMA 0 ICR */
-#define        MCFSIM_DMA1ICR          MCFSIM_ICR7     /* DMA 1 ICR */
-#define        MCFSIM_DMA2ICR          MCFSIM_ICR8     /* DMA 2 ICR */
-#define        MCFSIM_DMA3ICR          MCFSIM_ICR9     /* DMA 3 ICR */
-#define        MCFSIM_QSPIICR          MCFSIM_ICR10    /* QSPI ICR */
-
-/*
- *     Define system peripheral IRQ usage.
- */
-#define        MCF_IRQ_QSPI            28              /* QSPI, Level 4 */
-#define        MCF_IRQ_TIMER           30              /* Timer0, Level 6 */
-#define        MCF_IRQ_PROFILER        31              /* Timer1, Level 7 */
-
-#define        MCF_IRQ_UART0           73              /* UART0 */
-#define        MCF_IRQ_UART1           74              /* UART1 */
-
-/*
- *     General purpose IO registers (in MBAR2).
- */
-#define        MCFSIM2_GPIOREAD        (MCF_MBAR2 + 0x000)     /* GPIO read values */
-#define        MCFSIM2_GPIOWRITE       (MCF_MBAR2 + 0x004)     /* GPIO write values */
-#define        MCFSIM2_GPIOENABLE      (MCF_MBAR2 + 0x008)     /* GPIO enabled */
-#define        MCFSIM2_GPIOFUNC        (MCF_MBAR2 + 0x00C)     /* GPIO function */
-#define        MCFSIM2_GPIO1READ       (MCF_MBAR2 + 0x0B0)     /* GPIO1 read values */
-#define        MCFSIM2_GPIO1WRITE      (MCF_MBAR2 + 0x0B4)     /* GPIO1 write values */
-#define        MCFSIM2_GPIO1ENABLE     (MCF_MBAR2 + 0x0B8)     /* GPIO1 enabled */
-#define        MCFSIM2_GPIO1FUNC       (MCF_MBAR2 + 0x0BC)     /* GPIO1 function */
-
-#define        MCFSIM2_GPIOINTSTAT     (MCF_MBAR2 + 0xc0)      /* GPIO intr status */
-#define        MCFSIM2_GPIOINTCLEAR    (MCF_MBAR2 + 0xc0)      /* GPIO intr clear */
-#define        MCFSIM2_GPIOINTENABLE   (MCF_MBAR2 + 0xc4)      /* GPIO intr enable */
-
-#define        MCFSIM2_INTLEVEL1       (MCF_MBAR2 + 0x140)     /* Intr level reg 1 */
-#define        MCFSIM2_INTLEVEL2       (MCF_MBAR2 + 0x144)     /* Intr level reg 2 */
-#define        MCFSIM2_INTLEVEL3       (MCF_MBAR2 + 0x148)     /* Intr level reg 3 */
-#define        MCFSIM2_INTLEVEL4       (MCF_MBAR2 + 0x14c)     /* Intr level reg 4 */
-#define        MCFSIM2_INTLEVEL5       (MCF_MBAR2 + 0x150)     /* Intr level reg 5 */
-#define        MCFSIM2_INTLEVEL6       (MCF_MBAR2 + 0x154)     /* Intr level reg 6 */
-#define        MCFSIM2_INTLEVEL7       (MCF_MBAR2 + 0x158)     /* Intr level reg 7 */
-#define        MCFSIM2_INTLEVEL8       (MCF_MBAR2 + 0x15c)     /* Intr level reg 8 */
-
-#define        MCFSIM2_DMAROUTE        (MCF_MBAR2 + 0x188)     /* DMA routing */
-
-#define        MCFSIM2_IDECONFIG1      (MCF_MBAR2 + 0x18c)     /* IDEconfig1 */
-#define        MCFSIM2_IDECONFIG2      (MCF_MBAR2 + 0x190)     /* IDEconfig2 */
-
-/*
- * Define the base interrupt for the second interrupt controller.
- * We set it to 128, out of the way of the base interrupts, and plenty
- * of room for its 64 interrupts.
- */
-#define        MCFINTC2_VECBASE        128
-
-#define        MCFINTC2_GPIOIRQ0       (MCFINTC2_VECBASE + 32)
-#define        MCFINTC2_GPIOIRQ1       (MCFINTC2_VECBASE + 33)
-#define        MCFINTC2_GPIOIRQ2       (MCFINTC2_VECBASE + 34)
-#define        MCFINTC2_GPIOIRQ3       (MCFINTC2_VECBASE + 35)
-#define        MCFINTC2_GPIOIRQ4       (MCFINTC2_VECBASE + 36)
-#define        MCFINTC2_GPIOIRQ5       (MCFINTC2_VECBASE + 37)
-#define        MCFINTC2_GPIOIRQ6       (MCFINTC2_VECBASE + 38)
-#define        MCFINTC2_GPIOIRQ7       (MCFINTC2_VECBASE + 39)
-
-/*
- * Generic GPIO support
- */
-#define MCFGPIO_PIN_MAX                64
-#define MCFGPIO_IRQ_MAX                -1
-#define MCFGPIO_IRQ_VECBASE    -1
-
-/****************************************************************************/
-
-#ifdef __ASSEMBLER__
-
-/*
- *     The M5249C3 board needs a little help getting all its SIM devices
- *     initialized at kernel start time. dBUG doesn't set much up, so
- *     we need to do it manually.
- */
-.macro m5249c3_setup
-       /*
-        *      Set MBAR1 and MBAR2, just incase they are not set.
-        */
-       movel   #0x10000001,%a0
-       movec   %a0,%MBAR                       /* map MBAR region */
-       subql   #1,%a0                          /* get MBAR address in a0 */
-
-       movel   #0x80000001,%a1
-       movec   %a1,#3086                       /* map MBAR2 region */
-       subql   #1,%a1                          /* get MBAR2 address in a1 */
-
-       /*
-        *      Move secondary interrupts to their base (128).
-        */
-       moveb   #MCFINTC2_VECBASE,%d0
-       moveb   %d0,0x16b(%a1)                  /* interrupt base register */
-
-       /*
-        *      Work around broken CSMR0/DRAM vector problem.
-        */
-       movel   #0x001F0021,%d0                 /* disable C/I bit */
-       movel   %d0,0x84(%a0)                   /* set CSMR0 */
-
-       /*
-        *      Disable the PLL firstly. (Who knows what state it is
-        *      in here!).
-        */
-       movel   0x180(%a1),%d0                  /* get current PLL value */
-       andl    #0xfffffffe,%d0                 /* PLL bypass first */
-       movel   %d0,0x180(%a1)                  /* set PLL register */
-       nop
-
-#if CONFIG_CLOCK_FREQ == 140000000
-       /*
-        *      Set initial clock frequency. This assumes M5249C3 board
-        *      is fitted with 11.2896MHz crystal. It will program the
-        *      PLL for 140MHz. Lets go fast :-)
-        */
-       movel   #0x125a40f0,%d0                 /* set for 140MHz */
-       movel   %d0,0x180(%a1)                  /* set PLL register */
-       orl     #0x1,%d0
-       movel   %d0,0x180(%a1)                  /* set PLL register */
-#endif
-
-       /*
-        *      Setup CS1 for ethernet controller.
-        *      (Setup as per M5249C3 doco).
-        */
-       movel  #0xe0000000,%d0                  /* CS1 mapped at 0xe0000000 */
-       movel  %d0,0x8c(%a0)
-       movel  #0x001f0021,%d0                  /* CS1 size of 1Mb */
-       movel  %d0,0x90(%a0)
-       movew  #0x0080,%d0                      /* CS1 = 16bit port, AA */
-       movew  %d0,0x96(%a0)
-
-       /*
-        *      Setup CS2 for IDE interface.
-        */
-       movel   #0x50000000,%d0                 /* CS2 mapped at 0x50000000 */
-       movel   %d0,0x98(%a0)
-       movel   #0x001f0001,%d0                 /* CS2 size of 1MB */
-       movel   %d0,0x9c(%a0)
-       movew   #0x0080,%d0                     /* CS2 = 16bit, TA */
-       movew   %d0,0xa2(%a0)
-
-       movel   #0x00107000,%d0                 /* IDEconfig1 */
-       movel   %d0,0x18c(%a1)
-       movel   #0x000c0400,%d0                 /* IDEconfig2 */
-       movel   %d0,0x190(%a1)
-
-       movel   #0x00080000,%d0                 /* GPIO19, IDE reset bit */
-       orl     %d0,0xc(%a1)                    /* function GPIO19 */
-       orl     %d0,0x8(%a1)                    /* enable GPIO19 as output */
-        orl    %d0,0x4(%a1)                    /* de-assert IDE reset */
-.endm
-
-#define        PLATFORM_SETUP  m5249c3_setup
-
-#endif /* __ASSEMBLER__ */
-
-/****************************************************************************/
-#endif /* m5249sim_h */
index acab61c..e33f5bb 100644 (file)
 #define m525xsim_h
 /****************************************************************************/
 
+/*
+ *     This header supports ColdFire 5249, 5251 and 5253. There are a few
+ *     little differences between them, but most of the peripheral support
+ *     can be used by all of them.
+ */
 #define CPU_NAME               "COLDFIRE(m525x)"
 #define CPU_INSTR_PER_JIFFY    3
 #define MCF_BUSCLK             (MCF_CLK / 2)
@@ -65,6 +70,8 @@
 #define MCFSIM_DCR             (MCF_MBAR + 0x100)      /* DRAM Control */
 #define MCFSIM_DACR0           (MCF_MBAR + 0x108)      /* DRAM 0 Addr/Ctrl */
 #define MCFSIM_DMR0            (MCF_MBAR + 0x10c)      /* DRAM 0 Mask */
+#define MCFSIM_DACR1           (MCF_MBAR + 0x110)      /* DRAM 1 Addr/Ctrl */
+#define MCFSIM_DMR1            (MCF_MBAR + 0x114)      /* DRAM 1 Mask */
 
 /*
  * Secondary Interrupt Controller (in MBAR2)
 #define MCFQSPI_BASE           (MCF_MBAR + 0x300)      /* Base address QSPI */
 #define MCFQSPI_SIZE           0x40                    /* Register set size */
 
-
+#ifdef CONFIG_M5249
+#define MCFQSPI_CS0            29
+#define MCFQSPI_CS1            24
+#define MCFQSPI_CS2            21
+#define MCFQSPI_CS3            22
+#else
 #define MCFQSPI_CS0            15
 #define MCFQSPI_CS1            16
 #define MCFQSPI_CS2            24
 #define MCFQSPI_CS3            28
+#endif
 
 /*
  *     I2C module.
 
 #define MCFI2C_BASE1           (MCF_MBAR2 + 0x440)     /* Base addreess I2C1 */
 #define MCFI2C_SIZE1           0x20                    /* Register set size */
+
 /*
  *     DMA unit base addresses.
  */
 #define MCF_IRQ_GPIO4          (MCFINTC2_VECBASE + 36)
 #define MCF_IRQ_GPIO5          (MCFINTC2_VECBASE + 37)
 #define MCF_IRQ_GPIO6          (MCFINTC2_VECBASE + 38)
+#define MCF_IRQ_GPIO7          (MCFINTC2_VECBASE + 39)
 
 #define MCF_IRQ_USBWUP         (MCFINTC2_VECBASE + 40)
 #define MCF_IRQ_I2C1           (MCFINTC2_VECBASE + 62)
 #define MCFSIM2_GPIOINTCLEAR   (MCF_MBAR2 + 0xc0)      /* GPIO intr clear */
 #define MCFSIM2_GPIOINTENABLE  (MCF_MBAR2 + 0xc4)      /* GPIO intr enable */
 
+#define MCFSIM2_DMAROUTE       (MCF_MBAR2 + 0x188)     /* DMA routing */
+#define MCFSIM2_IDECONFIG1     (MCF_MBAR2 + 0x18c)     /* IDEconfig1 */
+#define MCFSIM2_IDECONFIG2     (MCF_MBAR2 + 0x190)     /* IDEconfig2 */
+
 /*
  * Generic GPIO support
  */
 #define MCFGPIO_PIN_MAX                64
+#ifdef CONFIG_M5249
+#define MCFGPIO_IRQ_MAX                -1
+#define MCFGPIO_IRQ_VECBASE    -1
+#else
 #define MCFGPIO_IRQ_MAX                7
 #define MCFGPIO_IRQ_VECBASE    MCF_IRQ_GPIO0
+#endif
+
+/****************************************************************************/
+
+#ifdef __ASSEMBLER__
+#ifdef CONFIG_M5249C3
+/*
+ *     The M5249C3 board needs a little help getting all its SIM devices
+ *     initialized at kernel start time. dBUG doesn't set much up, so
+ *     we need to do it manually.
+ */
+.macro m5249c3_setup
+       /*
+        *      Set MBAR1 and MBAR2, just incase they are not set.
+        */
+       movel   #0x10000001,%a0
+       movec   %a0,%MBAR                       /* map MBAR region */
+       subql   #1,%a0                          /* get MBAR address in a0 */
+
+       movel   #0x80000001,%a1
+       movec   %a1,#3086                       /* map MBAR2 region */
+       subql   #1,%a1                          /* get MBAR2 address in a1 */
+
+       /*
+        *      Move secondary interrupts to their base (128).
+        */
+       moveb   #MCFINTC2_VECBASE,%d0
+       moveb   %d0,0x16b(%a1)                  /* interrupt base register */
+
+       /*
+        *      Work around broken CSMR0/DRAM vector problem.
+        */
+       movel   #0x001F0021,%d0                 /* disable C/I bit */
+       movel   %d0,0x84(%a0)                   /* set CSMR0 */
+
+       /*
+        *      Disable the PLL firstly. (Who knows what state it is
+        *      in here!).
+        */
+       movel   0x180(%a1),%d0                  /* get current PLL value */
+       andl    #0xfffffffe,%d0                 /* PLL bypass first */
+       movel   %d0,0x180(%a1)                  /* set PLL register */
+       nop
+
+#if CONFIG_CLOCK_FREQ == 140000000
+       /*
+        *      Set initial clock frequency. This assumes M5249C3 board
+        *      is fitted with 11.2896MHz crystal. It will program the
+        *      PLL for 140MHz. Lets go fast :-)
+        */
+       movel   #0x125a40f0,%d0                 /* set for 140MHz */
+       movel   %d0,0x180(%a1)                  /* set PLL register */
+       orl     #0x1,%d0
+       movel   %d0,0x180(%a1)                  /* set PLL register */
+#endif
+
+       /*
+        *      Setup CS1 for ethernet controller.
+        *      (Setup as per M5249C3 doco).
+        */
+       movel  #0xe0000000,%d0                  /* CS1 mapped at 0xe0000000 */
+       movel  %d0,0x8c(%a0)
+       movel  #0x001f0021,%d0                  /* CS1 size of 1Mb */
+       movel  %d0,0x90(%a0)
+       movew  #0x0080,%d0                      /* CS1 = 16bit port, AA */
+       movew  %d0,0x96(%a0)
+
+       /*
+        *      Setup CS2 for IDE interface.
+        */
+       movel   #0x50000000,%d0                 /* CS2 mapped at 0x50000000 */
+       movel   %d0,0x98(%a0)
+       movel   #0x001f0001,%d0                 /* CS2 size of 1MB */
+       movel   %d0,0x9c(%a0)
+       movew   #0x0080,%d0                     /* CS2 = 16bit, TA */
+       movew   %d0,0xa2(%a0)
+
+       movel   #0x00107000,%d0                 /* IDEconfig1 */
+       movel   %d0,0x18c(%a1)
+       movel   #0x000c0400,%d0                 /* IDEconfig2 */
+       movel   %d0,0x190(%a1)
+
+       movel   #0x00080000,%d0                 /* GPIO19, IDE reset bit */
+       orl     %d0,0xc(%a1)                    /* function GPIO19 */
+       orl     %d0,0x8(%a1)                    /* enable GPIO19 as output */
+        orl    %d0,0x4(%a1)                    /* de-assert IDE reset */
+.endm
+
+#define        PLATFORM_SETUP  m5249c3_setup
 
+#endif /* CONFIG_M5249C3 */
+#endif /* __ASSEMBLER__ */
 /****************************************************************************/
 #endif /* m525xsim_h */
index b676a02..ea4791e 100644 (file)
@@ -8,7 +8,6 @@
 
 struct clk;
 
-#ifdef MCFPM_PPMCR0
 struct clk_ops {
        void (*enable)(struct clk *);
        void (*disable)(struct clk *);
@@ -23,6 +22,8 @@ struct clk {
 };
 
 extern struct clk *mcf_clks[];
+
+#ifdef MCFPM_PPMCR0
 extern struct clk_ops clk_ops0;
 #ifdef MCFPM_PPMCR1
 extern struct clk_ops clk_ops1;
@@ -38,6 +39,12 @@ static struct clk __clk_##clk_bank##_##clk_slot = { \
 
 void __clk_init_enabled(struct clk *);
 void __clk_init_disabled(struct clk *);
+#else
+#define DEFINE_CLK(clk_ref, clk_name, clk_rate) \
+        static struct clk clk_##clk_ref = { \
+                .name = clk_name, \
+                .rate = clk_rate, \
+        }
 #endif /* MCFPM_PPMCR0 */
 
 #endif /* mcfclk_h */
index 7a83e61..a04fd9b 100644 (file)
 #elif defined(CONFIG_M523x)
 #include <asm/m523xsim.h>
 #include <asm/mcfintc.h>
-#elif defined(CONFIG_M5249)
-#include <asm/m5249sim.h>
-#include <asm/mcfintc.h>
-#elif defined(CONFIG_M525x)
+#elif defined(CONFIG_M5249) || defined(CONFIG_M525x)
 #include <asm/m525xsim.h>
 #include <asm/mcfintc.h>
 #elif defined(CONFIG_M527x)
index 9059572..ef20916 100644 (file)
@@ -26,7 +26,7 @@ extern unsigned long memory_end;
 #define pfn_to_virt(pfn)       __va((pfn) << PAGE_SHIFT)
 
 #define virt_to_page(addr)     (mem_map + (((unsigned long)(addr)-PAGE_OFFSET) >> PAGE_SHIFT))
-#define page_to_virt(page)     ((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET)
+#define page_to_virt(page)     __va(((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET))
 
 #define pfn_to_page(pfn)       virt_to_page(pfn_to_virt(pfn))
 #define page_to_pfn(page)      virt_to_pfn(page_to_virt(page))
index 10ca051..c1e2dfb 100644 (file)
@@ -10,7 +10,7 @@
 void *memcpy(void *to, const void *from, size_t n)
 {
        void *xto = to;
-       size_t temp, temp1;
+       size_t temp;
 
        if (!n)
                return xto;
@@ -47,6 +47,7 @@ void *memcpy(void *to, const void *from, size_t n)
                for (; temp; temp--)
                        *lto++ = *lfrom++;
 #else
+               size_t temp1;
                asm volatile (
                        "       movel %2,%3\n"
                        "       andw  #7,%3\n"
diff --git a/arch/m68k/platform/68000/Makefile b/arch/m68k/platform/68000/Makefile
new file mode 100644 (file)
index 0000000..1eab70c
--- /dev/null
@@ -0,0 +1,18 @@
+##################################################
+#
+# Makefile for 68000 core based cpus
+#
+# 2012.10.21, Luis Alves <ljalvs@gmail.com>
+#             Merged all 68000 based cpu's config
+#             files into a single directory.
+#
+
+# 68328, 68EZ328, 68VZ328
+
+obj-y                  += entry.o ints.o timers.o
+obj-$(CONFIG_M68328)   += m68328.o
+obj-$(CONFIG_M68EZ328) += m68EZ328.o
+obj-$(CONFIG_M68VZ328) += m68VZ328.o
+obj-$(CONFIG_ROM)      += romvec.o
+
+extra-y                := head.o
diff --git a/arch/m68k/platform/68000/head.S b/arch/m68k/platform/68000/head.S
new file mode 100644 (file)
index 0000000..536ef96
--- /dev/null
@@ -0,0 +1,240 @@
+/*
+ * head.S - Common startup code for 68000 core based CPU's
+ *
+ * 2012.10.21, Luis Alves <ljalvs@gmail.com>, Single head.S file for all
+ *             68000 core based CPU's. Based on the sources from:
+ *             Coldfire by Greg Ungerer <gerg@snapgear.com>
+ *             68328 by D. Jeff Dionne <jeff@ryeham.ee.ryerson.ca>,
+ *                      Kenneth Albanowski <kjahds@kjahds.com>,
+ *                      The Silver Hammer Group, Ltd.
+ *
+ */
+
+#include <linux/linkage.h>
+#include <linux/init.h>
+#include <asm/asm-offsets.h>
+#include <asm/thread_info.h>
+
+
+/*****************************************************************************
+ * UCSIMM and UCDIMM use CONFIG_MEMORY_RESERVE to reserve some RAM
+ *****************************************************************************/
+#ifdef CONFIG_MEMORY_RESERVE
+#define RAMEND (CONFIG_RAMBASE+CONFIG_RAMSIZE)-(CONFIG_MEMORY_RESERVE*0x100000)
+#else
+#define RAMEND (CONFIG_RAMBASE+CONFIG_RAMSIZE)
+#endif
+/*****************************************************************************/
+
+.global _start
+.global _rambase
+.global _ramvec
+.global _ramstart
+.global _ramend
+
+#if defined(CONFIG_PILOT) || defined(CONFIG_INIT_LCD)
+.global bootlogo_bits
+#endif
+
+/* Defining DEBUG_HEAD_CODE, serial port in 68x328 is inited */
+/* #define DEBUG_HEAD_CODE */
+#undef DEBUG_HEAD_CODE
+
+.data
+
+/*****************************************************************************
+ * RAM setup pointers. Used by the kernel to determine RAM location and size.
+ *****************************************************************************/
+
+_rambase:
+       .long   0
+_ramvec:
+       .long   0
+_ramstart:
+       .long   0
+_ramend:
+       .long   0
+
+__HEAD
+
+/*****************************************************************************
+ * Entry point, where all begins!
+ *****************************************************************************/
+
+_start:
+
+/* Pilot need this specific signature at the start of ROM */
+#ifdef CONFIG_PILOT
+       .byte   0x4e, 0xfa, 0x00, 0x0a          /* bra opcode (jmp 10 bytes) */
+       .byte   'b', 'o', 'o', 't'
+       .word   10000
+       nop
+       moveq   #0, %d0
+       movew   %d0, 0xfffff618                 /* Watchdog off */
+       movel   #0x00011f07, 0xfffff114         /* CS A1 Mask */
+#endif /* CONFIG_PILOT */
+
+       movew   #0x2700, %sr                    /* disable all interrupts */
+
+/*****************************************************************************
+ * Setup PLL and wait for it to settle (in 68x328 cpu's).
+ * Also, if enabled, init serial port.
+ *****************************************************************************/
+#if defined(CONFIG_M68328) || \
+    defined(CONFIG_M68EZ328) || \
+    defined(CONFIG_M68VZ328)
+
+/* Serial port setup. Should only be needed if debugging this startup code. */
+#ifdef DEBUG_HEAD_CODE
+       movew   #0x0800, 0xfffff906             /* Ignore CTS */
+       movew   #0x010b, 0xfffff902             /* BAUD to 9600 */
+       movew   #0xe100, 0xfffff900             /* enable */
+#endif /* DEBUG_HEAD */
+
+#ifdef CONFIG_PILOT
+       movew   #0x2410, 0xfffff200             /* PLLCR */
+#else
+       movew   #0x2400, 0xfffff200             /* PLLCR */
+#endif
+       movew   #0x0123, 0xfffff202             /* PLLFSR */
+       moveq   #0, %d0
+       movew   #16384, %d0                     /* PLL settle wait loop */
+_pll_settle:
+       subw    #1, %d0
+       bne     _pll_settle
+#endif /* CONFIG_M68x328 */
+
+
+/*****************************************************************************
+ * If running kernel from ROM some specific initialization has to be done.
+ * (Assuming that everything is already init'ed when running from RAM)
+ *****************************************************************************/
+#ifdef CONFIG_ROMKERNEL
+
+/*****************************************************************************
+ * Init chip registers (uCsimm specific)
+ *****************************************************************************/
+#ifdef CONFIG_UCSIMM
+       moveb   #0x00, 0xfffffb0b       /* Watchdog off */
+       moveb   #0x10, 0xfffff000       /* SCR */
+       moveb   #0x00, 0xfffff40b       /* enable chip select */
+       moveb   #0x00, 0xfffff423       /* enable /DWE */
+       moveb   #0x08, 0xfffffd0d       /* disable hardmap */
+       moveb   #0x07, 0xfffffd0e       /* level 7 interrupt clear */
+       movew   #0x8600, 0xfffff100     /* FLASH at 0x10c00000 */
+       movew   #0x018b, 0xfffff110     /* 2Meg, enable, 0ws */
+       movew   #0x8f00, 0xfffffc00     /* DRAM configuration */
+       movew   #0x9667, 0xfffffc02     /* DRAM control */
+       movew   #0x0000, 0xfffff106     /* DRAM at 0x00000000 */
+       movew   #0x068f, 0xfffff116     /* 8Meg, enable, 0ws */
+       moveb   #0x40, 0xfffff300       /* IVR */
+       movel   #0x007FFFFF, %d0        /* IMR */
+       movel   %d0, 0xfffff304
+       moveb   0xfffff42b, %d0
+       andb    #0xe0, %d0
+       moveb   %d0, 0xfffff42b
+#endif
+
+/*****************************************************************************
+ * Init LCD controller.
+ * (Assuming that LCD controller is already init'ed when running from RAM)
+ *****************************************************************************/
+#ifdef CONFIG_INIT_LCD
+#ifdef CONFIG_PILOT
+       moveb   #0, 0xfffffA27                  /* LCKCON */
+       movel   #_start, 0xfffffA00             /* LSSA */
+       moveb   #0xa, 0xfffffA05                /* LVPW */
+       movew   #0x9f, 0xFFFFFa08               /* LXMAX */
+       movew   #0x9f, 0xFFFFFa0a               /* LYMAX */
+       moveb   #9, 0xfffffa29                  /* LBAR */
+       moveb   #0, 0xfffffa25                  /* LPXCD */
+       moveb   #0x04, 0xFFFFFa20               /* LPICF */
+       moveb   #0x58, 0xfffffA27               /* LCKCON */
+       moveb   #0x85, 0xfffff429               /* PFDATA */
+       moveb   #0xd8, 0xfffffA27               /* LCKCON */
+       moveb   #0xc5, 0xfffff429               /* PFDATA */
+       moveb   #0xd5, 0xfffff429               /* PFDATA */
+       movel   #bootlogo_bits, 0xFFFFFA00      /* LSSA */
+       moveb   #10, 0xFFFFFA05                 /* LVPW */
+       movew   #160, 0xFFFFFA08                /* LXMAX */
+       movew   #160, 0xFFFFFA0A                /* LYMAX */
+#else /* CONFIG_PILOT */
+       movel   #bootlogo_bits, 0xfffffA00      /* LSSA */
+       moveb   #0x28, 0xfffffA05               /* LVPW */
+       movew   #0x280, 0xFFFFFa08              /* LXMAX */
+       movew   #0x1df, 0xFFFFFa0a              /* LYMAX */
+       moveb   #0, 0xfffffa29                  /* LBAR */
+       moveb   #0, 0xfffffa25                  /* LPXCD */
+       moveb   #0x08, 0xFFFFFa20               /* LPICF */
+       moveb   #0x01, 0xFFFFFA21               /* -ve pol */
+       moveb   #0x81, 0xfffffA27               /* LCKCON */
+       movew   #0xff00, 0xfffff412             /* LCD pins */
+#endif /* CONFIG_PILOT */
+#endif /* CONFIG_INIT_LCD */
+
+/*****************************************************************************
+ * Kernel is running from FLASH/ROM (XIP)
+ * Copy init text & data to RAM
+ *****************************************************************************/
+       moveal  #_etext, %a0
+       moveal  #_sdata, %a1
+       moveal  #__bss_start, %a2
+_copy_initmem:
+       movel   %a0@+, %a1@+
+       cmpal   %a1, %a2
+       bhi     _copy_initmem
+#endif /* CONFIG_ROMKERNEL */
+
+/*****************************************************************************
+ * Setup basic memory information for kernel
+ *****************************************************************************/
+       movel   #CONFIG_VECTORBASE,_ramvec      /* set vector base location */
+       movel   #CONFIG_RAMBASE,_rambase        /* set the base of RAM */
+       movel   #RAMEND, _ramend                /* set end ram addr */
+       lea     __bss_stop,%a1
+       movel   %a1,_ramstart
+
+/*****************************************************************************
+ * If the kernel is in RAM, move romfs to right above bss and
+ * adjust _ramstart to where romfs ends.
+ *
+ * (Do this only if CONFIG_MTD_UCLINUX is true)
+ *****************************************************************************/
+
+#if defined(CONFIG_ROMFS_FS) && defined(CONFIG_RAMKERNEL) && \
+    defined(CONFIG_MTD_UCLINUX)
+       lea     __bss_start, %a0                /* get start of bss */
+       lea     __bss_stop, %a1                 /* set up destination  */
+       movel   %a0, %a2                        /* copy of bss start */
+
+       movel   8(%a0), %d0                     /* get size of ROMFS */
+       addql   #8, %d0                         /* allow for rounding */
+       andl    #0xfffffffc, %d0                /* whole words */
+
+       addl    %d0, %a0                        /* copy from end */
+       addl    %d0, %a1                        /* copy from end */
+       movel   %a1, _ramstart                  /* set start of ram */
+_copy_romfs:
+       movel   -(%a0), -(%a1)                  /* copy dword */
+       cmpl    %a0, %a2                        /* check if at end */
+       bne     _copy_romfs
+#endif /* CONFIG_ROMFS_FS && CONFIG_RAMKERNEL && CONFIG_MTD_UCLINUX */
+
+/*****************************************************************************
+ * Clear bss region
+ *****************************************************************************/
+       lea     __bss_start, %a0                /* get start of bss */
+       lea     __bss_stop, %a1                 /* get end of bss */
+_clear_bss:
+       movel   #0, (%a0)+                      /* clear each word */
+       cmpl    %a0, %a1                        /* check if at end */
+       bne     _clear_bss
+
+/*****************************************************************************
+ * Load the current task pointer and stack.
+ *****************************************************************************/
+       lea     init_thread_union,%a0
+       lea     THREAD_SIZE(%a0),%sp
+       jsr     start_kernel                    /* start Linux kernel */
+_exit:
+       jmp     _exit                           /* should never get here */
similarity index 98%
rename from arch/m68k/platform/68328/ints.c
rename to arch/m68k/platform/68000/ints.c
index b3810fe..cda49b1 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * linux/arch/m68knommu/platform/68328/ints.c
+ * ints.c - Generic interrupt controller support
  *
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file COPYING in the main directory of this archive
similarity index 97%
rename from arch/m68k/platform/68328/config.c
rename to arch/m68k/platform/68000/m68328.c
index 8c20e89..a86eb66 100644 (file)
@@ -1,7 +1,7 @@
 /***************************************************************************/
 
 /*
- *  linux/arch/m68knommu/platform/68328/config.c
+ *  m68328.c - 68328 specific config
  *
  *  Copyright (C) 1993 Hamish Macdonald
  *  Copyright (C) 1999 D. Jeff Dionne
similarity index 97%
rename from arch/m68k/platform/68EZ328/config.c
rename to arch/m68k/platform/68000/m68EZ328.c
index 4f158d5..a6eb72d 100644 (file)
@@ -1,7 +1,7 @@
 /***************************************************************************/
 
 /*
- *  linux/arch/m68knommu/platform/68EZ328/config.c
+ *  m68EZ328.c - 68EZ328 specific config
  *
  *  Copyright (C) 1993 Hamish Macdonald
  *  Copyright (C) 1999 D. Jeff Dionne
similarity index 98%
rename from arch/m68k/platform/68VZ328/config.c
rename to arch/m68k/platform/68000/m68VZ328.c
index 2ed8dc3..eb6964f 100644 (file)
@@ -1,7 +1,7 @@
 /***************************************************************************/
 
 /*
- *  linux/arch/m68knommu/platform/68VZ328/config.c
+ *  m68VZ328.c - 68VZ328 specific config
  *
  *  Copyright (C) 1993 Hamish Macdonald
  *  Copyright (C) 1999 D. Jeff Dionne
@@ -28,7 +28,7 @@
 #include <asm/bootstd.h>
 
 #ifdef CONFIG_INIT_LCD
-#include "bootlogo.h"
+#include "bootlogo-vz.h"
 #endif
 
 /***************************************************************************/
similarity index 94%
rename from arch/m68k/platform/68328/romvec.S
rename to arch/m68k/platform/68000/romvec.S
index 3108446..15c70cd 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * linux/arch/m68knommu/platform/68328/romvec.S
+ * romvec.S - Vector table for 68000 cpus
  *
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file COPYING in the main directory of this archive
similarity index 98%
rename from arch/m68k/platform/68328/timers.c
rename to arch/m68k/platform/68000/timers.c
index f4dc9b2..ec30acb 100644 (file)
@@ -1,7 +1,7 @@
 /***************************************************************************/
 
 /*
- *  linux/arch/m68knommu/platform/68328/timers.c
+ *  timers.c - Generic hardware timer support.
  *
  *  Copyright (C) 1993 Hamish Macdonald
  *  Copyright (C) 1999 D. Jeff Dionne
diff --git a/arch/m68k/platform/68328/Makefile b/arch/m68k/platform/68328/Makefile
deleted file mode 100644 (file)
index ee61bf8..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-#
-# Makefile for arch/m68knommu/platform/68328.
-#
-
-model-y                          := ram
-model-$(CONFIG_ROMKERNEL) := rom
-
-head-y                 = head-$(model-y).o
-head-$(CONFIG_PILOT)   = head-pilot.o
-head-$(CONFIG_DRAGEN2) = head-de2.o
-
-obj-y                  += entry.o ints.o timers.o
-obj-$(CONFIG_M68328)   += config.o
-obj-$(CONFIG_ROM)      += romvec.o
-
-extra-y                        := head.o
-
-$(obj)/head.o: $(obj)/$(head-y)
-       ln -sf $(head-y) $(obj)/head.o
-
-clean-files := $(obj)/head.o $(head-y)
diff --git a/arch/m68k/platform/68328/head-de2.S b/arch/m68k/platform/68328/head-de2.S
deleted file mode 100644 (file)
index 537d324..0000000
+++ /dev/null
@@ -1,128 +0,0 @@
-
-#define        MEM_END 0x00800000      /* Memory size 8Mb */
-
-#undef CRT_DEBUG
-
-.macro PUTC CHAR
-#ifdef CRT_DEBUG
-       moveq   #\CHAR, %d7
-       jsr     putc
-#endif
-.endm
-
-       .global _start
-       .global _rambase
-       .global _ramvec
-       .global _ramstart
-       .global _ramend
-       
-       .data
-
-/*
- *     Set up the usable of RAM stuff
- */
-_rambase:
-       .long   0
-_ramvec:
-       .long   0
-_ramstart:
-       .long   0
-_ramend:
-       .long   0
-
-       .text
-
-_start:
-
-/*
- * Setup initial stack
- */
-       /* disable all interrupts */
-       movew   #0x2700, %sr
-       movel   #-1, 0xfffff304
-       movel   #MEM_END-4, %sp
-
-       PUTC    '\r'
-       PUTC    '\n'
-       PUTC    'A'
-       PUTC    'B'
-
-/*
- *     Determine end of RAM
- */
-
-       movel   #MEM_END, %a0
-       movel   %a0, _ramend
-
-       PUTC    'C'
-
-/*
- *     Move ROM filesystem above bss :-)
- */
-
-       moveal  #__bss_start, %a0               /* romfs at the start of bss */
-       moveal  #__bss_stop, %a1                /* Set up destination  */
-       movel   %a0, %a2                        /* Copy of bss start */
-
-       movel   8(%a0), %d1                     /* Get size of ROMFS */
-       addql   #8, %d1                         /* Allow for rounding */
-       andl    #0xfffffffc, %d1        /* Whole words */
-
-       addl    %d1, %a0                        /* Copy from end */
-       addl    %d1, %a1                        /* Copy from end */
-       movel   %a1, _ramstart          /* Set start of ram */
-
-1:
-       movel   -(%a0), %d0                     /* Copy dword */
-       movel   %d0, -(%a1)
-       cmpl    %a0, %a2                        /* Check if at end */
-       bne     1b
-
-       PUTC    'D'
-
-/*
- * Initialize BSS segment to 0
- */
-
-       lea     __bss_start, %a0
-       lea     __bss_stop, %a1
-
-       /* Copy 0 to %a0 until %a0 == %a1 */
-2:     cmpal   %a0, %a1
-       beq     1f
-       clrl    (%a0)+
-       bra     2b
-1:
-
-       PUTC    'E'
-
-/*
- * Load the current task pointer and stack
- */
-
-       lea     init_thread_union, %a0
-       lea     0x2000(%a0), %sp
-
-       PUTC    'F'
-       PUTC    '\r'
-       PUTC    '\n'
-
-/*
- * Go
- */
-
-       jmp     start_kernel
-
-/*
- * Local functions
- */
-#ifdef CRT_DEBUG
-putc:
-       moveb   %d7, 0xfffff907
-1:
-       movew   0xfffff906, %d7
-       andw    #0x2000, %d7
-       beq     1b
-       rts
-#endif
diff --git a/arch/m68k/platform/68328/head-pilot.S b/arch/m68k/platform/68328/head-pilot.S
deleted file mode 100644 (file)
index 45a9dad..0000000
+++ /dev/null
@@ -1,207 +0,0 @@
-/*
- * linux/arch/m68knommu/platform/68328/head-pilot.S
- * - A startup file for the MC68328
- *
- * Copyright (C) 1998  D. Jeff Dionne <jeff@ryeham.ee.ryerson.ca>,
- *                     Kenneth Albanowski <kjahds@kjahds.com>,
- *                     The Silver Hammer Group, Ltd.
- *
- * (c) 1995, Dionne & Associates
- * (c) 1995, DKG Display Tech.
- */
-
-#define ASSEMBLY
-
-#define IMMED #
-#define        DBG_PUTC(x)     moveb IMMED x, 0xfffff907
-
-
-.global _stext
-.global _start
-
-.global _rambase
-.global _ramvec
-.global _ramstart
-.global _ramend
-
-.global bootlogo_bits
-
-/*****************************************************************************/
-
-.data
-
-/*
- *      Set up the usable of RAM stuff. Size of RAM is determined then
- *      an initial stack set up at the end.
- */
-.align 4
-_ramvec:
-.long   0
-_rambase:
-.long   0
-_ramstart:
-.long   0
-_ramend:
-.long   0
-
-.text
-       
-_start:
-_stext:
-
-
-#ifdef CONFIG_M68328
-
-#ifdef CONFIG_PILOT
-       .byte 0x4e, 0xfa, 0x00, 0x0a /* Jmp +X bytes */
-       .byte 'b', 'o', 'o', 't'
-       .word 10000
-
-       nop
-#endif
-
-       moveq   #0, %d0
-       movew   %d0, 0xfffff618 /* Watchdog off */
-       movel   #0x00011f07, 0xfffff114 /* CS A1 Mask */
-
-       movew   #0x0800, 0xfffff906 /* Ignore CTS */
-       movew   #0x010b, 0xfffff902 /* BAUD to 9600 */
-
-       movew   #0x2410, 0xfffff200 /* PLLCR */
-       movew   #0x123, 0xfffff202 /* PLLFSR */
-
-#ifdef CONFIG_PILOT
-       moveb   #0, 0xfffffA27 /* LCKCON */
-       movel   #_start, 0xfffffA00 /* LSSA */
-       moveb   #0xa, 0xfffffA05 /* LVPW */
-       movew   #0x9f, 0xFFFFFa08 /* LXMAX */
-       movew   #0x9f, 0xFFFFFa0a /* LYMAX */
-       moveb   #9, 0xfffffa29 /* LBAR */
-       moveb   #0, 0xfffffa25 /* LPXCD */
-       moveb   #0x04, 0xFFFFFa20 /* LPICF */
-       moveb   #0x58, 0xfffffA27 /* LCKCON */
-       moveb   #0x85, 0xfffff429 /* PFDATA */
-       moveb   #0xd8, 0xfffffA27 /* LCKCON */
-       moveb   #0xc5, 0xfffff429 /* PFDATA */
-       moveb   #0xd5, 0xfffff429 /* PFDATA */
-
-       moveal  #0x00100000, %a3
-       moveal  #0x100ffc00, %a4
-#endif /* CONFIG_PILOT */
-
-#endif /* CONFIG_M68328 */
-
-       movew   #0x2700, %sr
-       lea     %a4@(-4), %sp
-
-       DBG_PUTC('\r')
-       DBG_PUTC('\n')
-       DBG_PUTC('A')
-
-       moveq   #0,%d0
-       movew   #16384, %d0  /* PLL settle wait loop */
-L0:
-       subw    #1, %d0
-       bne     L0
-
-       DBG_PUTC('B')
-
-       /* Copy command line from beginning of RAM (+16) to end of bss */
-       movel   #CONFIG_VECTORBASE, %d7
-       addl    #16, %d7
-       moveal  %d7, %a0
-       moveal  #__bss_stop, %a1
-       lea     %a1@(512), %a2
-
-       DBG_PUTC('C')
-
-       /* Copy %a0 to %a1 until %a1 == %a2 */
-L2:
-       movel   %a0@+, %d0
-       movel   %d0, %a1@+
-       cmpal   %a1, %a2
-       bhi     L2
-
-       /* Copy data+init segment from ROM to RAM */
-       moveal  #_etext, %a0
-       moveal  #_sdata, %a1
-       moveal  #__init_end, %a2
-
-       DBG_PUTC('D')
-
-       /* Copy %a0 to %a1 until %a1 == %a2 */
-LD1:
-       movel   %a0@+, %d0
-       movel   %d0, %a1@+
-       cmpal   %a1, %a2
-       bhi     LD1
-
-       DBG_PUTC('E')
-
-       moveal  #__bss_start, %a0
-       moveal  #__bss_stop, %a1
-
-       /* Copy 0 to %a0 until %a0 == %a1 */
-L1:
-       movel   #0, %a0@+
-       cmpal   %a0, %a1
-       bhi     L1
-
-       DBG_PUTC('F')
-
-       /* Copy command line from end of bss to command line */
-       moveal  #__bss_stop, %a0
-       moveal  #command_line, %a1
-       lea     %a1@(512), %a2
-
-       DBG_PUTC('G')
-
-       /* Copy %a0 to %a1 until %a1 == %a2 */
-L3:
-       movel   %a0@+, %d0
-       movel   %d0, %a1@+
-       cmpal   %a1, %a2
-       bhi     L3
-
-       movel   #_sdata, %d0    
-       movel   %d0, _rambase   
-       movel   #__bss_stop, %d0
-       movel   %d0, _ramstart
-
-       movel   %a4, %d0
-       subl    #4096, %d0      /* Reserve 4K of stack */
-       moveq   #79, %d7
-       movel   %d0, _ramend
-
-       pea     0
-       pea     env
-       pea     %sp@(4)
-       pea     0
-
-       DBG_PUTC('H')
-
-#ifdef CONFIG_PILOT
-       movel   #bootlogo_bits, 0xFFFFFA00
-       moveb   #10, 0xFFFFFA05
-       movew   #160, 0xFFFFFA08
-       movew   #160, 0xFFFFFA0A
-#endif /* CONFIG_PILOT */
-
-       DBG_PUTC('I')
-
-       lea     init_thread_union, %a0
-       lea     0x2000(%a0), %sp
-
-       DBG_PUTC('J')
-       DBG_PUTC('\r')
-       DBG_PUTC('\n')
-
-       jsr     start_kernel
-_exit:
-
-       jmp     _exit
-
-
-       .data
-env:
-       .long   0
diff --git a/arch/m68k/platform/68328/head-ram.S b/arch/m68k/platform/68328/head-ram.S
deleted file mode 100644 (file)
index 5189ef9..0000000
+++ /dev/null
@@ -1,141 +0,0 @@
-
-       .global __main
-       .global __rom_start
-
-        .global _rambase
-        .global _ramstart
-       
-       .global splash_bits
-       .global _start
-       .global _stext
-       .global _edata
-
-#define DEBUG
-#define ROM_OFFSET 0x10C00000
-#define STACK_GAURD 0x10
-
-       .text
-       
-_start:
-_stext:
-       movew   #0x2700, %sr            /* Exceptions off! */
-
-#if 0
-       /* Init chip registers.  uCsimm specific */
-       moveb   #0x00,   0xfffffb0b     /* Watchdog off */
-       moveb   #0x10,   0xfffff000     /* SCR */
-
-       movew   #0x2400, 0xfffff200     /* PLLCR */
-       movew   #0x0123, 0xfffff202     /* PLLFSR */
-
-       moveb   #0x00,   0xfffff40b     /* enable chip select */
-       moveb   #0x00,   0xfffff423     /* enable /DWE */
-       moveb   #0x08,   0xfffffd0d     /* disable hardmap */
-       moveb   #0x07,   0xfffffd0e     /* level 7 interrupt clear */
-
-       movew   #0x8600, 0xfffff100     /* FLASH at 0x10c00000 */
-       movew   #0x018b, 0xfffff110     /* 2Meg, enable, 0ws */
-
-       movew   #0x8f00, 0xfffffc00     /* DRAM configuration */
-       movew   #0x9667, 0xfffffc02     /* DRAM control */
-       movew   #0x0000, 0xfffff106     /* DRAM at 0x00000000 */
-       movew   #0x068f, 0xfffff116     /* 8Meg, enable, 0ws */
-
-       moveb   #0x40,   0xfffff300     /* IVR */
-       movel   #0x007FFFFF, %d0        /* IMR */
-       movel   %d0,     0xfffff304
-
-       moveb   0xfffff42b, %d0
-       andb    #0xe0,   %d0
-       moveb   %d0,     0xfffff42b
-
-       moveb   #0x08,   0xfffff907     /* Ignore CTS */
-       movew   #0x010b, 0xfffff902     /* BAUD to 9600 */
-       movew   #0xe100, 0xfffff900     /* enable */
-#endif
-
-       movew   #16384, %d0  /* PLL settle wait loop */
-L0:
-       subw    #1, %d0
-       bne     L0
-#ifdef DEBUG
-       moveq   #70, %d7                /* 'F' */
-       moveb   %d7,0xfffff907          /* No absolute addresses */
-pclp1:
-       movew   0xfffff906, %d7
-       andw    #0x2000, %d7
-       beq     pclp1
-#endif /* DEBUG */
-
-#ifdef DEBUG
-       moveq   #82, %d7                /* 'R' */
-       moveb   %d7,0xfffff907          /* No absolute addresses */
-pclp3:
-       movew   0xfffff906, %d7
-       andw    #0x2000, %d7
-       beq     pclp3
-#endif /* DEBUG */
-       moveal  #0x007ffff0, %ssp
-       moveal  #__bss_start, %a0
-       moveal  #__bss_stop, %a1
-
-       /* Copy 0 to %a0 until %a0 >= %a1 */
-L1:
-       movel   #0, %a0@+
-       cmpal   %a0, %a1
-       bhi     L1
-
-#ifdef DEBUG
-       moveq   #67, %d7                /* 'C' */
-       jsr     putc
-#endif /* DEBUG */
-
-       pea     0
-       pea     env
-       pea     %sp@(4)
-       pea     0
-
-#ifdef DEBUG
-       moveq   #70, %d7                /* 'F' */
-       jsr     putc
-#endif /* DEBUG */
-
-lp:
-       jsr     start_kernel
-        jmp lp
-_exit:
-
-       jmp     _exit
-
-__main:
-       /* nothing */
-       rts
-
-#ifdef DEBUG
-putc:
-       moveb   %d7,0xfffff907
-pclp:
-       movew   0xfffff906, %d7
-       andw    #0x2000, %d7
-       beq     pclp
-       rts
-#endif /* DEBUG */
-
-       .data
-
-/*
- *      Set up the usable of RAM stuff. Size of RAM is determined then
- *      an initial stack set up at the end.
- */
-.align 4
-_ramvec:
-.long   0
-_rambase:
-.long   0
-_ramstart:
-.long   0
-_ramend:
-.long   0
-
-env:
-       .long   0
diff --git a/arch/m68k/platform/68328/head-rom.S b/arch/m68k/platform/68328/head-rom.S
deleted file mode 100644 (file)
index 3dff98b..0000000
+++ /dev/null
@@ -1,105 +0,0 @@
-       
-       .global _start
-       .global _stext
-
-       .global _rambase
-       .global _ramvec
-       .global _ramstart
-       .global _ramend
-
-#ifdef CONFIG_INIT_LCD
-       .global bootlogo_bits
-#endif
-
-       .data
-
-/*
- *      Set up the usable of RAM stuff. Size of RAM is determined then
- *      an initial stack set up at the end.
- */
-.align 4
-_ramvec:
-.long   0
-_rambase:
-.long   0
-_ramstart:
-.long   0
-_ramend:
-.long   0
-
-#define        RAMEND  (CONFIG_RAMBASE + CONFIG_RAMSIZE)
-
-       .text
-_start:
-_stext:        movew   #0x2700,%sr
-#ifdef CONFIG_INIT_LCD
-       movel   #bootlogo_bits, 0xfffffA00 /* LSSA */
-       moveb   #0x28,   0xfffffA05     /* LVPW */
-       movew   #0x280,  0xFFFFFa08     /* LXMAX */
-       movew   #0x1df,  0xFFFFFa0a     /* LYMAX */
-       moveb   #0,      0xfffffa29     /* LBAR */
-       moveb   #0,      0xfffffa25     /* LPXCD */
-       moveb   #0x08,   0xFFFFFa20     /* LPICF */
-       moveb   #0x01,   0xFFFFFA21     /* -ve pol */
-       moveb   #0x81,   0xfffffA27     /* LCKCON */
-       movew   #0xff00, 0xfffff412     /* LCD pins */
-#endif
-       moveal  #RAMEND-CONFIG_MEMORY_RESERVE*0x100000 - 0x10, %sp
-       movew   #32767, %d0  /* PLL settle wait loop */
-1:     subq    #1, %d0
-       bne     1b
-
-       /* Copy data segment from ROM to RAM */
-       moveal  #_etext, %a0
-       moveal  #_sdata, %a1
-       moveal  #_edata, %a2
-
-       /* Copy %a0 to %a1 until %a1 == %a2 */
-1:     movel   %a0@+, %a1@+
-       cmpal   %a1, %a2
-       bhi     1b
-
-       moveal  #__bss_start, %a0
-       moveal  #__bss_stop, %a1
-       /* Copy 0 to %a0 until %a0 == %a1 */
-       
-1:
-       clrl    %a0@+
-       cmpal   %a0, %a1
-       bhi     1b
-
-        movel   #_sdata, %d0    
-        movel   %d0, _rambase        
-        movel   #__bss_stop, %d0
-        movel   %d0, _ramstart
-       movel   #RAMEND-CONFIG_MEMORY_RESERVE*0x100000, %d0
-       movel   %d0, _ramend
-       movel   #CONFIG_VECTORBASE,     %d0
-       movel   %d0, _ramvec
-       
-/*
- * load the current task pointer and stack
- */
-       lea     init_thread_union, %a0
-       lea     0x2000(%a0), %sp
-
-1:     jsr     start_kernel
-        bra 1b
-_exit:
-
-       jmp     _exit
-
-
-putc:
-       moveb   %d7,0xfffff907
-1:
-       movew   0xfffff906, %d7
-       andw    #0x2000, %d7
-       beq     1b
-       rts
-
-       .data
-env:
-       .long   0
-       .text
-
diff --git a/arch/m68k/platform/68EZ328/Makefile b/arch/m68k/platform/68EZ328/Makefile
deleted file mode 100644 (file)
index b44d799..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-#
-# Makefile for arch/m68knommu/platform/68EZ328.
-#
-
-obj-y := config.o
diff --git a/arch/m68k/platform/68VZ328/Makefile b/arch/m68k/platform/68VZ328/Makefile
deleted file mode 100644 (file)
index 8166741..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-#
-# Makefile for arch/m68k/platform/68VZ328.
-#
-
-obj-y          := config.o
index 9cd13b4..fddfdcc 100644 (file)
 #include <asm/mcfsim.h>
 #include <asm/mcfclk.h>
 
-/***************************************************************************/
-#ifndef MCFPM_PPMCR0
-struct clk *clk_get(struct device *dev, const char *id)
+static DEFINE_SPINLOCK(clk_lock);
+
+#ifdef MCFPM_PPMCR0
+/*
+ *     For more advanced ColdFire parts that have clocks that can be enabled
+ *     we supply enable/disable functions. These must properly define their
+ *     clocks in their platform specific code.
+ */
+void __clk_init_enabled(struct clk *clk)
 {
-       return NULL;
+       clk->enabled = 1;
+       clk->clk_ops->enable(clk);
 }
-EXPORT_SYMBOL(clk_get);
 
-int clk_enable(struct clk *clk)
+void __clk_init_disabled(struct clk *clk)
 {
-       return 0;
+       clk->enabled = 0;
+       clk->clk_ops->disable(clk);
 }
-EXPORT_SYMBOL(clk_enable);
 
-void clk_disable(struct clk *clk)
+static void __clk_enable0(struct clk *clk)
 {
+       __raw_writeb(clk->slot, MCFPM_PPMCR0);
 }
-EXPORT_SYMBOL(clk_disable);
 
-void clk_put(struct clk *clk)
+static void __clk_disable0(struct clk *clk)
+{
+       __raw_writeb(clk->slot, MCFPM_PPMSR0);
+}
+
+struct clk_ops clk_ops0 = {
+       .enable         = __clk_enable0,
+       .disable        = __clk_disable0,
+};
+
+#ifdef MCFPM_PPMCR1
+static void __clk_enable1(struct clk *clk)
 {
+       __raw_writeb(clk->slot, MCFPM_PPMCR1);
 }
-EXPORT_SYMBOL(clk_put);
 
-unsigned long clk_get_rate(struct clk *clk)
+static void __clk_disable1(struct clk *clk)
 {
-       return MCF_CLK;
+       __raw_writeb(clk->slot, MCFPM_PPMSR1);
 }
-EXPORT_SYMBOL(clk_get_rate);
-#else
-static DEFINE_SPINLOCK(clk_lock);
+
+struct clk_ops clk_ops1 = {
+       .enable         = __clk_enable1,
+       .disable        = __clk_disable1,
+};
+#endif /* MCFPM_PPMCR1 */
+#endif /* MCFPM_PPMCR0 */
 
 struct clk *clk_get(struct device *dev, const char *id)
 {
@@ -101,48 +122,3 @@ unsigned long clk_get_rate(struct clk *clk)
 EXPORT_SYMBOL(clk_get_rate);
 
 /***************************************************************************/
-
-void __clk_init_enabled(struct clk *clk)
-{
-       clk->enabled = 1;
-       clk->clk_ops->enable(clk);
-}
-
-void __clk_init_disabled(struct clk *clk)
-{
-       clk->enabled = 0;
-       clk->clk_ops->disable(clk);
-}
-
-static void __clk_enable0(struct clk *clk)
-{
-       __raw_writeb(clk->slot, MCFPM_PPMCR0);
-}
-
-static void __clk_disable0(struct clk *clk)
-{
-       __raw_writeb(clk->slot, MCFPM_PPMSR0);
-}
-
-struct clk_ops clk_ops0 = {
-       .enable         = __clk_enable0,
-       .disable        = __clk_disable0,
-};
-
-#ifdef MCFPM_PPMCR1
-static void __clk_enable1(struct clk *clk)
-{
-       __raw_writeb(clk->slot, MCFPM_PPMCR1);
-}
-
-static void __clk_disable1(struct clk *clk)
-{
-       __raw_writeb(clk->slot, MCFPM_PPMSR1);
-}
-
-struct clk_ops clk_ops1 = {
-       .enable         = __clk_enable1,
-       .disable        = __clk_disable1,
-};
-#endif /* MCFPM_PPMCR1 */
-#endif /* MCFPM_PPMCR0 */
index 0864b83..b0d1641 100644 (file)
@@ -21,7 +21,7 @@ static void intc2_irq_gpio_mask(struct irq_data *d)
 {
        u32 imr;
        imr = readl(MCFSIM2_GPIOINTENABLE);
-       imr &= ~(0x1 << (d->irq - MCFINTC2_GPIOIRQ0));
+       imr &= ~(0x1 << (d->irq - MCF_IRQ_GPIO0));
        writel(imr, MCFSIM2_GPIOINTENABLE);
 }
 
@@ -29,13 +29,13 @@ static void intc2_irq_gpio_unmask(struct irq_data *d)
 {
        u32 imr;
        imr = readl(MCFSIM2_GPIOINTENABLE);
-       imr |= (0x1 << (d->irq - MCFINTC2_GPIOIRQ0));
+       imr |= (0x1 << (d->irq - MCF_IRQ_GPIO0));
        writel(imr, MCFSIM2_GPIOINTENABLE);
 }
 
 static void intc2_irq_gpio_ack(struct irq_data *d)
 {
-       writel(0x1 << (d->irq - MCFINTC2_GPIOIRQ0), MCFSIM2_GPIOINTCLEAR);
+       writel(0x1 << (d->irq - MCF_IRQ_GPIO0), MCFSIM2_GPIOINTCLEAR);
 }
 
 static struct irq_chip intc2_irq_gpio_chip = {
@@ -50,7 +50,7 @@ static int __init mcf_intc2_init(void)
        int irq;
 
        /* GPIO interrupt sources */
-       for (irq = MCFINTC2_GPIOIRQ0; (irq <= MCFINTC2_GPIOIRQ7); irq++) {
+       for (irq = MCF_IRQ_GPIO0; (irq <= MCF_IRQ_GPIO7); irq++) {
                irq_set_chip(irq, &intc2_irq_gpio_chip);
                irq_set_handler(irq, handle_edge_irq);
        }
index 6bfbeeb..0e55f44 100644 (file)
 #include <asm/machdep.h>
 #include <asm/coldfire.h>
 #include <asm/mcfsim.h>
+#include <asm/mcfclk.h>
+
+/***************************************************************************/
+
+DEFINE_CLK(pll, "pll.0", MCF_CLK);
+DEFINE_CLK(sys, "sys.0", MCF_BUSCLK);
+DEFINE_CLK(mcftmr0, "mcftmr.0", MCF_BUSCLK);
+DEFINE_CLK(mcftmr1, "mcftmr.1", MCF_BUSCLK);
+DEFINE_CLK(mcfuart0, "mcfuart.0", MCF_BUSCLK);
+DEFINE_CLK(mcfuart1, "mcfuart.1", MCF_BUSCLK);
+
+struct clk *mcf_clks[] = {
+       &clk_pll,
+       &clk_sys,
+       &clk_mcftmr0,
+       &clk_mcftmr1,
+       &clk_mcfuart0,
+       &clk_mcfuart1,
+       NULL
+};
 
 /***************************************************************************/
 
index ff37fe9..2b10e9f 100644 (file)
 #include <asm/machdep.h>
 #include <asm/coldfire.h>
 #include <asm/mcfsim.h>
+#include <asm/mcfclk.h>
+
+/***************************************************************************/
+
+DEFINE_CLK(pll, "pll.0", MCF_CLK);
+DEFINE_CLK(sys, "sys.0", MCF_BUSCLK);
+DEFINE_CLK(mcfpit0, "mcfpit.0", MCF_CLK);
+DEFINE_CLK(mcfpit1, "mcfpit.1", MCF_CLK);
+DEFINE_CLK(mcfpit2, "mcfpit.2", MCF_CLK);
+DEFINE_CLK(mcfpit3, "mcfpit.3", MCF_CLK);
+DEFINE_CLK(mcfuart0, "mcfuart.0", MCF_BUSCLK);
+DEFINE_CLK(mcfuart1, "mcfuart.1", MCF_BUSCLK);
+DEFINE_CLK(mcfuart2, "mcfuart.2", MCF_BUSCLK);
+DEFINE_CLK(fec0, "fec.0", MCF_BUSCLK);
+
+struct clk *mcf_clks[] = {
+       &clk_pll,
+       &clk_sys,
+       &clk_mcfpit0,
+       &clk_mcfpit1,
+       &clk_mcfpit2,
+       &clk_mcfpit3,
+       &clk_mcfuart0,
+       &clk_mcfuart1,
+       &clk_mcfuart2,
+       &clk_fec0,
+       NULL
+};
 
 /***************************************************************************/
 
index 23b19cb..c80b5e5 100644 (file)
 #include <asm/machdep.h>
 #include <asm/coldfire.h>
 #include <asm/mcfsim.h>
+#include <asm/mcfclk.h>
+
+/***************************************************************************/
+
+DEFINE_CLK(pll, "pll.0", MCF_CLK);
+DEFINE_CLK(sys, "sys.0", MCF_BUSCLK);
+DEFINE_CLK(mcftmr0, "mcftmr.0", MCF_BUSCLK);
+DEFINE_CLK(mcftmr1, "mcftmr.1", MCF_BUSCLK);
+DEFINE_CLK(mcfuart0, "mcfuart.0", MCF_BUSCLK);
+DEFINE_CLK(mcfuart1, "mcfuart.1", MCF_BUSCLK);
+
+struct clk *mcf_clks[] = {
+       &clk_pll,
+       &clk_sys,
+       &clk_mcftmr0,
+       &clk_mcftmr1,
+       &clk_mcfuart0,
+       &clk_mcfuart1,
+       NULL
+};
 
 /***************************************************************************/
 
@@ -28,8 +48,8 @@ static struct resource m5249_smc91x_resources[] = {
                .flags          = IORESOURCE_MEM,
        },
        {
-               .start          = MCFINTC2_GPIOIRQ6,
-               .end            = MCFINTC2_GPIOIRQ6,
+               .start          = MCF_IRQ_GPIO6,
+               .end            = MCF_IRQ_GPIO6,
                .flags          = IORESOURCE_IRQ,
        },
 };
@@ -75,8 +95,8 @@ static void __init m5249_smc91x_init(void)
        gpio = readl(MCFSIM2_GPIOINTENABLE);
        writel(gpio | 0x40, MCFSIM2_GPIOINTENABLE);
 
-       gpio = readl(MCFSIM2_INTLEVEL5);
-       writel(gpio | 0x04000000, MCFSIM2_INTLEVEL5);
+       gpio = readl(MCFINTC2_INTPRI5);
+       writel(gpio | 0x04000000, MCFINTC2_INTPRI5);
 }
 
 #endif /* CONFIG_M5249C3 */
index fce8f8a..5b9f657 100644 (file)
 #include <asm/machdep.h>
 #include <asm/coldfire.h>
 #include <asm/mcfsim.h>
+#include <asm/mcfclk.h>
+
+/***************************************************************************/
+
+DEFINE_CLK(pll, "pll.0", MCF_CLK);
+DEFINE_CLK(sys, "sys.0", MCF_BUSCLK);
+DEFINE_CLK(mcftmr0, "mcftmr.0", MCF_BUSCLK);
+DEFINE_CLK(mcftmr1, "mcftmr.1", MCF_BUSCLK);
+DEFINE_CLK(mcfuart0, "mcfuart.0", MCF_BUSCLK);
+DEFINE_CLK(mcfuart1, "mcfuart.1", MCF_BUSCLK);
+
+struct clk *mcf_clks[] = {
+       &clk_pll,
+       &clk_sys,
+       &clk_mcftmr0,
+       &clk_mcftmr1,
+       &clk_mcfuart0,
+       &clk_mcfuart1,
+       NULL
+};
 
 /***************************************************************************/
 
index 45b246d..a8c5856 100644 (file)
@@ -19,6 +19,7 @@
 #include <asm/coldfire.h>
 #include <asm/mcfsim.h>
 #include <asm/mcfuart.h>
+#include <asm/mcfclk.h>
 
 /***************************************************************************/
 
@@ -30,6 +31,31 @@ unsigned char ledbank = 0xff;
 
 /***************************************************************************/
 
+DEFINE_CLK(pll, "pll.0", MCF_CLK);
+DEFINE_CLK(sys, "sys.0", MCF_BUSCLK);
+DEFINE_CLK(mcftmr0, "mcftmr.0", MCF_BUSCLK);
+DEFINE_CLK(mcftmr1, "mcftmr.1", MCF_BUSCLK);
+DEFINE_CLK(mcftmr2, "mcftmr.2", MCF_BUSCLK);
+DEFINE_CLK(mcftmr3, "mcftmr.3", MCF_BUSCLK);
+DEFINE_CLK(mcfuart0, "mcfuart.0", MCF_BUSCLK);
+DEFINE_CLK(mcfuart1, "mcfuart.1", MCF_BUSCLK);
+DEFINE_CLK(fec0, "fec.0", MCF_BUSCLK);
+
+struct clk *mcf_clks[] = {
+       &clk_pll,
+       &clk_sys,
+       &clk_mcftmr0,
+       &clk_mcftmr1,
+       &clk_mcftmr2,
+       &clk_mcftmr3,
+       &clk_mcfuart0,
+       &clk_mcfuart1,
+       &clk_fec0,
+       NULL
+};
+
+/***************************************************************************/
+
 static void __init m5272_uarts_init(void)
 {
        u32 v;
index 1431ba0..6fbfe90 100644 (file)
 #include <asm/coldfire.h>
 #include <asm/mcfsim.h>
 #include <asm/mcfuart.h>
+#include <asm/mcfclk.h>
+
+/***************************************************************************/
+
+DEFINE_CLK(pll, "pll.0", MCF_CLK);
+DEFINE_CLK(sys, "sys.0", MCF_BUSCLK);
+DEFINE_CLK(mcfpit0, "mcfpit.0", MCF_CLK);
+DEFINE_CLK(mcfpit1, "mcfpit.1", MCF_CLK);
+DEFINE_CLK(mcfpit2, "mcfpit.2", MCF_CLK);
+DEFINE_CLK(mcfpit3, "mcfpit.3", MCF_CLK);
+DEFINE_CLK(mcfuart0, "mcfuart.0", MCF_BUSCLK);
+DEFINE_CLK(mcfuart1, "mcfuart.1", MCF_BUSCLK);
+DEFINE_CLK(mcfuart2, "mcfuart.2", MCF_BUSCLK);
+DEFINE_CLK(fec0, "fec.0", MCF_BUSCLK);
+DEFINE_CLK(fec1, "fec.1", MCF_BUSCLK);
+
+struct clk *mcf_clks[] = {
+       &clk_pll,
+       &clk_sys,
+       &clk_mcfpit0,
+       &clk_mcfpit1,
+       &clk_mcfpit2,
+       &clk_mcfpit3,
+       &clk_mcfuart0,
+       &clk_mcfuart1,
+       &clk_mcfuart2,
+       &clk_fec0,
+       &clk_fec1,
+       NULL
+};
 
 /***************************************************************************/
 
index f9f7e6a..83b7dad 100644 (file)
 #include <asm/coldfire.h>
 #include <asm/mcfsim.h>
 #include <asm/mcfuart.h>
+#include <asm/mcfclk.h>
+
+/***************************************************************************/
+
+DEFINE_CLK(pll, "pll.0", MCF_CLK);
+DEFINE_CLK(sys, "sys.0", MCF_BUSCLK);
+DEFINE_CLK(mcfpit0, "mcfpit.0", MCF_CLK);
+DEFINE_CLK(mcfpit1, "mcfpit.1", MCF_CLK);
+DEFINE_CLK(mcfpit2, "mcfpit.2", MCF_CLK);
+DEFINE_CLK(mcfpit3, "mcfpit.3", MCF_CLK);
+DEFINE_CLK(mcfuart0, "mcfuart.0", MCF_BUSCLK);
+DEFINE_CLK(mcfuart1, "mcfuart.1", MCF_BUSCLK);
+DEFINE_CLK(mcfuart2, "mcfuart.2", MCF_BUSCLK);
+DEFINE_CLK(fec0, "fec.0", MCF_BUSCLK);
+
+struct clk *mcf_clks[] = {
+       &clk_pll,
+       &clk_sys,
+       &clk_mcfpit0,
+       &clk_mcfpit1,
+       &clk_mcfpit2,
+       &clk_mcfpit3,
+       &clk_mcfuart0,
+       &clk_mcfuart1,
+       &clk_mcfuart2,
+       &clk_fec0,
+       NULL
+};
 
 /***************************************************************************/
 
index a568d28..8874353 100644 (file)
@@ -17,6 +17,7 @@
 #include <asm/coldfire.h>
 #include <asm/mcfsim.h>
 #include <asm/mcfwdebug.h>
+#include <asm/mcfclk.h>
 
 /***************************************************************************/
 
@@ -28,6 +29,25 @@ unsigned char ledbank = 0xff;
 
 /***************************************************************************/
 
+DEFINE_CLK(pll, "pll.0", MCF_CLK);
+DEFINE_CLK(sys, "sys.0", MCF_BUSCLK);
+DEFINE_CLK(mcftmr0, "mcftmr.0", MCF_BUSCLK);
+DEFINE_CLK(mcftmr1, "mcftmr.1", MCF_BUSCLK);
+DEFINE_CLK(mcfuart0, "mcfuart.0", MCF_BUSCLK);
+DEFINE_CLK(mcfuart1, "mcfuart.1", MCF_BUSCLK);
+
+struct clk *mcf_clks[] = {
+       &clk_pll,
+       &clk_sys,
+       &clk_mcftmr0,
+       &clk_mcftmr1,
+       &clk_mcfuart0,
+       &clk_mcfuart1,
+       NULL
+};
+
+/***************************************************************************/
+
 void __init config_BSP(char *commandp, int size)
 {
 #if defined(CONFIG_NETtel) || \
index bb6c746..2fb3cdb 100644 (file)
 #include <asm/machdep.h>
 #include <asm/coldfire.h>
 #include <asm/mcfsim.h>
+#include <asm/mcfclk.h>
+
+/***************************************************************************/
+
+DEFINE_CLK(pll, "pll.0", MCF_CLK);
+DEFINE_CLK(sys, "sys.0", MCF_BUSCLK);
+DEFINE_CLK(mcftmr0, "mcftmr.0", MCF_BUSCLK);
+DEFINE_CLK(mcftmr1, "mcftmr.1", MCF_BUSCLK);
+DEFINE_CLK(mcfuart0, "mcfuart.0", MCF_BUSCLK);
+DEFINE_CLK(mcfuart1, "mcfuart.1", MCF_BUSCLK);
+
+struct clk *mcf_clks[] = {
+       &clk_pll,
+       &clk_sys,
+       &clk_mcftmr0,
+       &clk_mcftmr1,
+       &clk_mcfuart0,
+       &clk_mcfuart1,
+       NULL
+};
 
 /***************************************************************************/
 
index b587bf3..952da53 100644 (file)
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/mm.h>
+#include <linux/clk.h>
 #include <linux/bootmem.h>
 #include <asm/pgalloc.h>
 #include <asm/machdep.h>
 #include <asm/coldfire.h>
 #include <asm/m54xxsim.h>
 #include <asm/mcfuart.h>
+#include <asm/mcfclk.h>
 #include <asm/m54xxgpt.h>
+#include <asm/mcfclk.h>
 #ifdef CONFIG_MMU
 #include <asm/mmu_context.h>
 #endif
 
 /***************************************************************************/
 
+DEFINE_CLK(pll, "pll.0", MCF_CLK);
+DEFINE_CLK(sys, "sys.0", MCF_BUSCLK);
+DEFINE_CLK(mcfslt0, "mcfslt.0", MCF_BUSCLK);
+DEFINE_CLK(mcfslt1, "mcfslt.1", MCF_BUSCLK);
+DEFINE_CLK(mcfuart0, "mcfuart.0", MCF_BUSCLK);
+DEFINE_CLK(mcfuart1, "mcfuart.1", MCF_BUSCLK);
+DEFINE_CLK(mcfuart2, "mcfuart.2", MCF_BUSCLK);
+DEFINE_CLK(mcfuart3, "mcfuart.3", MCF_BUSCLK);
+
+struct clk *mcf_clks[] = {
+       &clk_pll,
+       &clk_sys,
+       &clk_mcfslt0,
+       &clk_mcfslt1,
+       &clk_mcfuart0,
+       &clk_mcfuart1,
+       &clk_mcfuart2,
+       &clk_mcfuart3,
+       NULL
+};
+
+/***************************************************************************/
+
 static void __init m54xx_uarts_init(void)
 {
        /* enable io pins */
index cb8f992..0f7c852 100644 (file)
@@ -111,6 +111,7 @@ config VSYSCALL
 config NUMA
        bool "Non Uniform Memory Access (NUMA) Support"
        depends on MMU && SYS_SUPPORTS_NUMA && EXPERIMENTAL
+       select ARCH_WANT_NUMA_VARIABLE_LOCALITY
        default n
        help
          Some SH systems have many various memories scattered around
index 65a872b..97f8c5a 100644 (file)
@@ -22,6 +22,8 @@ config X86
        def_bool y
        select HAVE_AOUT if X86_32
        select HAVE_UNSTABLE_SCHED_CLOCK
+       select ARCH_SUPPORTS_NUMA_BALANCING
+       select ARCH_WANTS_PROT_NUMA_PROT_NONE
        select HAVE_IDE
        select HAVE_OPROFILE
        select HAVE_PCSPKR_PLATFORM
index fd13815..6e8fdf5 100644 (file)
@@ -69,37 +69,23 @@ extern u64 efi_call6(void *fp, u64 arg1, u64 arg2, u64 arg3,
        efi_call6((void *)(f), (u64)(a1), (u64)(a2), (u64)(a3),         \
                  (u64)(a4), (u64)(a5), (u64)(a6))
 
-extern unsigned long efi_call_virt_prelog(void);
-extern void efi_call_virt_epilog(unsigned long);
-
-#define efi_callx(x, func, ...)                                        \
-       ({                                                      \
-               efi_status_t __status;                          \
-               unsigned long __pgd;                            \
-                                                               \
-               __pgd = efi_call_virt_prelog();                 \
-               __status = efi_call##x(func, __VA_ARGS__);      \
-               efi_call_virt_epilog(__pgd);                    \
-               __status;                                       \
-       })
-
 #define efi_call_virt0(f)                              \
-       efi_callx(0, (void *)(efi.systab->runtime->f))
+       efi_call0((void *)(efi.systab->runtime->f))
 #define efi_call_virt1(f, a1)                                  \
-       efi_callx(1, (void *)(efi.systab->runtime->f), (u64)(a1))
+       efi_call1((void *)(efi.systab->runtime->f), (u64)(a1))
 #define efi_call_virt2(f, a1, a2)                                      \
-       efi_callx(2, (void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2))
+       efi_call2((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2))
 #define efi_call_virt3(f, a1, a2, a3)                                  \
-       efi_callx(3, (void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
+       efi_call3((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
                  (u64)(a3))
 #define efi_call_virt4(f, a1, a2, a3, a4)                              \
-       efi_callx(4, (void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
+       efi_call4((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
                  (u64)(a3), (u64)(a4))
 #define efi_call_virt5(f, a1, a2, a3, a4, a5)                          \
-       efi_callx(5, (void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
+       efi_call5((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
                  (u64)(a3), (u64)(a4), (u64)(a5))
 #define efi_call_virt6(f, a1, a2, a3, a4, a5, a6)                      \
-       efi_callx(6, (void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
+       efi_call6((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
                  (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6))
 
 extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size,
index a1f780d..5199db2 100644 (file)
@@ -404,7 +404,14 @@ static inline int pte_same(pte_t a, pte_t b)
 
 static inline int pte_present(pte_t a)
 {
-       return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
+       return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE |
+                              _PAGE_NUMA);
+}
+
+#define pte_accessible pte_accessible
+static inline int pte_accessible(pte_t a)
+{
+       return pte_flags(a) & _PAGE_PRESENT;
 }
 
 static inline int pte_hidden(pte_t pte)
@@ -420,7 +427,8 @@ static inline int pmd_present(pmd_t pmd)
         * the _PAGE_PSE flag will remain set at all times while the
         * _PAGE_PRESENT bit is clear).
         */
-       return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE);
+       return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE |
+                                _PAGE_NUMA);
 }
 
 static inline int pmd_none(pmd_t pmd)
@@ -479,6 +487,11 @@ static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
 
 static inline int pmd_bad(pmd_t pmd)
 {
+#ifdef CONFIG_NUMA_BALANCING
+       /* pmd_numa check */
+       if ((pmd_flags(pmd) & (_PAGE_NUMA|_PAGE_PRESENT)) == _PAGE_NUMA)
+               return 0;
+#endif
        return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
 }
 
index ec8a1fc..3c32db8 100644 (file)
 #define _PAGE_FILE     (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
 #define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE)
 
+/*
+ * _PAGE_NUMA indicates that this page will trigger a numa hinting
+ * minor page fault to gather numa placement statistics (see
+ * pte_numa()). The bit picked (8) is within the range between
+ * _PAGE_FILE (6) and _PAGE_PROTNONE (8) bits. Therefore, it doesn't
+ * require changes to the swp entry format because that bit is always
+ * zero when the pte is not present.
+ *
+ * The bit picked must be always zero when the pmd is present and not
+ * present, so that we don't lose information when we set it while
+ * atomically clearing the present bit.
+ *
+ * Because we shared the same bit (8) with _PAGE_PROTNONE this can be
+ * interpreted as _PAGE_NUMA only in places that _PAGE_PROTNONE
+ * couldn't reach, like handle_mm_fault() (see access_error in
+ * arch/x86/mm/fault.c, the vma protection must not be PROT_NONE for
+ * handle_mm_fault() to be invoked).
+ */
+#define _PAGE_NUMA     _PAGE_PROTNONE
+
 #define _PAGE_TABLE    (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER |        \
                         _PAGE_ACCESSED | _PAGE_DIRTY)
 #define _KERNPG_TABLE  (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED |    \
index d4f460f..f84fe00 100644 (file)
@@ -103,13 +103,71 @@ void __init tboot_probe(void)
        pr_debug("tboot_size: 0x%x\n", tboot->tboot_size);
 }
 
+static pgd_t *tboot_pg_dir;
+static struct mm_struct tboot_mm = {
+       .mm_rb          = RB_ROOT,
+       .pgd            = swapper_pg_dir,
+       .mm_users       = ATOMIC_INIT(2),
+       .mm_count       = ATOMIC_INIT(1),
+       .mmap_sem       = __RWSEM_INITIALIZER(init_mm.mmap_sem),
+       .page_table_lock =  __SPIN_LOCK_UNLOCKED(init_mm.page_table_lock),
+       .mmlist         = LIST_HEAD_INIT(init_mm.mmlist),
+};
+
 static inline void switch_to_tboot_pt(void)
 {
-#ifdef CONFIG_X86_32
-       load_cr3(initial_page_table);
-#else
-       write_cr3(real_mode_header->trampoline_pgd);
-#endif
+       write_cr3(virt_to_phys(tboot_pg_dir));
+}
+
+static int map_tboot_page(unsigned long vaddr, unsigned long pfn,
+                         pgprot_t prot)
+{
+       pgd_t *pgd;
+       pud_t *pud;
+       pmd_t *pmd;
+       pte_t *pte;
+
+       pgd = pgd_offset(&tboot_mm, vaddr);
+       pud = pud_alloc(&tboot_mm, pgd, vaddr);
+       if (!pud)
+               return -1;
+       pmd = pmd_alloc(&tboot_mm, pud, vaddr);
+       if (!pmd)
+               return -1;
+       pte = pte_alloc_map(&tboot_mm, NULL, pmd, vaddr);
+       if (!pte)
+               return -1;
+       set_pte_at(&tboot_mm, vaddr, pte, pfn_pte(pfn, prot));
+       pte_unmap(pte);
+       return 0;
+}
+
+static int map_tboot_pages(unsigned long vaddr, unsigned long start_pfn,
+                          unsigned long nr)
+{
+       /* Reuse the original kernel mapping */
+       tboot_pg_dir = pgd_alloc(&tboot_mm);
+       if (!tboot_pg_dir)
+               return -1;
+
+       for (; nr > 0; nr--, vaddr += PAGE_SIZE, start_pfn++) {
+               if (map_tboot_page(vaddr, start_pfn, PAGE_KERNEL_EXEC))
+                       return -1;
+       }
+
+       return 0;
+}
+
+static void tboot_create_trampoline(void)
+{
+       u32 map_base, map_size;
+
+       /* Create identity map for tboot shutdown code. */
+       map_base = PFN_DOWN(tboot->tboot_base);
+       map_size = PFN_UP(tboot->tboot_size);
+       if (map_tboot_pages(map_base << PAGE_SHIFT, map_base, map_size))
+               panic("tboot: Error mapping tboot pages (mfns) @ 0x%x, 0x%x\n",
+                     map_base, map_size);
 }
 
 #ifdef CONFIG_ACPI_SLEEP
@@ -167,6 +225,14 @@ void tboot_shutdown(u32 shutdown_type)
        if (!tboot_enabled())
                return;
 
+       /*
+        * if we're being called before the 1:1 mapping is set up then just
+        * return and let the normal shutdown happen; this should only be
+        * due to very early panic()
+        */
+       if (!tboot_pg_dir)
+               return;
+
        /* if this is S3 then set regions to MAC */
        if (shutdown_type == TB_SHUTDOWN_S3)
                if (tboot_setup_sleep())
@@ -277,6 +343,8 @@ static __init int tboot_late_init(void)
        if (!tboot_enabled())
                return 0;
 
+       tboot_create_trampoline();
+
        atomic_set(&ap_wfs_count, 0);
        register_hotcpu_notifier(&tboot_cpu_notifier);
 
index 3a3e8c9..9a907a6 100644 (file)
@@ -145,19 +145,6 @@ static int addr_to_vsyscall_nr(unsigned long addr)
        return nr;
 }
 
-#ifdef CONFIG_SECCOMP
-static int vsyscall_seccomp(struct task_struct *tsk, int syscall_nr)
-{
-       if (!seccomp_mode(&tsk->seccomp))
-               return 0;
-       task_pt_regs(tsk)->orig_ax = syscall_nr;
-       task_pt_regs(tsk)->ax = syscall_nr;
-       return __secure_computing(syscall_nr);
-}
-#else
-#define vsyscall_seccomp(_tsk, _nr) 0
-#endif
-
 static bool write_ok_or_segv(unsigned long ptr, size_t size)
 {
        /*
@@ -190,10 +177,9 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
 {
        struct task_struct *tsk;
        unsigned long caller;
-       int vsyscall_nr;
+       int vsyscall_nr, syscall_nr, tmp;
        int prev_sig_on_uaccess_error;
        long ret;
-       int skip;
 
        /*
         * No point in checking CS -- the only way to get here is a user mode
@@ -225,56 +211,84 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
        }
 
        tsk = current;
-       /*
-        * With a real vsyscall, page faults cause SIGSEGV.  We want to
-        * preserve that behavior to make writing exploits harder.
-        */
-       prev_sig_on_uaccess_error = current_thread_info()->sig_on_uaccess_error;
-       current_thread_info()->sig_on_uaccess_error = 1;
 
        /*
+        * Check for access_ok violations and find the syscall nr.
+        *
         * NULL is a valid user pointer (in the access_ok sense) on 32-bit and
         * 64-bit, so we don't need to special-case it here.  For all the
         * vsyscalls, NULL means "don't write anything" not "write it at
         * address 0".
         */
-       ret = -EFAULT;
-       skip = 0;
        switch (vsyscall_nr) {
        case 0:
-               skip = vsyscall_seccomp(tsk, __NR_gettimeofday);
-               if (skip)
-                       break;
-
                if (!write_ok_or_segv(regs->di, sizeof(struct timeval)) ||
-                   !write_ok_or_segv(regs->si, sizeof(struct timezone)))
-                       break;
+                   !write_ok_or_segv(regs->si, sizeof(struct timezone))) {
+                       ret = -EFAULT;
+                       goto check_fault;
+               }
+
+               syscall_nr = __NR_gettimeofday;
+               break;
+
+       case 1:
+               if (!write_ok_or_segv(regs->di, sizeof(time_t))) {
+                       ret = -EFAULT;
+                       goto check_fault;
+               }
+
+               syscall_nr = __NR_time;
+               break;
+
+       case 2:
+               if (!write_ok_or_segv(regs->di, sizeof(unsigned)) ||
+                   !write_ok_or_segv(regs->si, sizeof(unsigned))) {
+                       ret = -EFAULT;
+                       goto check_fault;
+               }
+
+               syscall_nr = __NR_getcpu;
+               break;
+       }
+
+       /*
+        * Handle seccomp.  regs->ip must be the original value.
+        * See seccomp_send_sigsys and Documentation/prctl/seccomp_filter.txt.
+        *
+        * We could optimize the seccomp disabled case, but performance
+        * here doesn't matter.
+        */
+       regs->orig_ax = syscall_nr;
+       regs->ax = -ENOSYS;
+       tmp = secure_computing(syscall_nr);
+       if ((!tmp && regs->orig_ax != syscall_nr) || regs->ip != address) {
+               warn_bad_vsyscall(KERN_DEBUG, regs,
+                                 "seccomp tried to change syscall nr or ip");
+               do_exit(SIGSYS);
+       }
+       if (tmp)
+               goto do_ret;  /* skip requested */
 
+       /*
+        * With a real vsyscall, page faults cause SIGSEGV.  We want to
+        * preserve that behavior to make writing exploits harder.
+        */
+       prev_sig_on_uaccess_error = current_thread_info()->sig_on_uaccess_error;
+       current_thread_info()->sig_on_uaccess_error = 1;
+
+       ret = -EFAULT;
+       switch (vsyscall_nr) {
+       case 0:
                ret = sys_gettimeofday(
                        (struct timeval __user *)regs->di,
                        (struct timezone __user *)regs->si);
                break;
 
        case 1:
-               skip = vsyscall_seccomp(tsk, __NR_time);
-               if (skip)
-                       break;
-
-               if (!write_ok_or_segv(regs->di, sizeof(time_t)))
-                       break;
-
                ret = sys_time((time_t __user *)regs->di);
                break;
 
        case 2:
-               skip = vsyscall_seccomp(tsk, __NR_getcpu);
-               if (skip)
-                       break;
-
-               if (!write_ok_or_segv(regs->di, sizeof(unsigned)) ||
-                   !write_ok_or_segv(regs->si, sizeof(unsigned)))
-                       break;
-
                ret = sys_getcpu((unsigned __user *)regs->di,
                                 (unsigned __user *)regs->si,
                                 NULL);
@@ -283,12 +297,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
 
        current_thread_info()->sig_on_uaccess_error = prev_sig_on_uaccess_error;
 
-       if (skip) {
-               if ((long)regs->ax <= 0L) /* seccomp errno emulation */
-                       goto do_ret;
-               goto done; /* seccomp trace/trap */
-       }
-
+check_fault:
        if (ret == -EFAULT) {
                /* Bad news -- userspace fed a bad pointer to a vsyscall. */
                warn_bad_vsyscall(KERN_INFO, regs,
@@ -311,7 +320,6 @@ do_ret:
        /* Emulate a ret instruction. */
        regs->ip = caller;
        regs->sp += 8;
-done:
        return true;
 
 sigsegv:
index 931930a..a718e0d 100644 (file)
@@ -919,13 +919,11 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
 
        /*
         * On success we use clflush, when the CPU supports it to
-        * avoid the wbindv. If the CPU does not support it, in the
-        * error case, and during early boot (for EFI) we fall back
-        * to cpa_flush_all (which uses wbinvd):
+        * avoid the wbindv. If the CPU does not support it and in the
+        * error case we fall back to cpa_flush_all (which uses
+        * wbindv):
         */
-       if (early_boot_irqs_disabled)
-               __cpa_flush_all((void *)(long)cache);
-       else if (!ret && cpu_has_clflush) {
+       if (!ret && cpu_has_clflush) {
                if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) {
                        cpa_flush_array(addr, numpages, cache,
                                        cpa.flags, pages);
index 217eb70..e27fbf8 100644 (file)
@@ -301,6 +301,13 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
        free_page((unsigned long)pgd);
 }
 
+/*
+ * Used to set accessed or dirty bits in the page table entries
+ * on other architectures. On x86, the accessed and dirty bits
+ * are tracked by hardware. However, do_wp_page calls this function
+ * to also make the pte writeable at the same time the dirty bit is
+ * set. In that case we do actually need to write the PTE.
+ */
 int ptep_set_access_flags(struct vm_area_struct *vma,
                          unsigned long address, pte_t *ptep,
                          pte_t entry, int dirty)
@@ -310,7 +317,6 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
        if (changed && dirty) {
                *ptep = entry;
                pte_update_defer(vma->vm_mm, address, ptep);
-               flush_tlb_page(vma, address);
        }
 
        return changed;
index 0a34d9e..ad44391 100644 (file)
@@ -239,7 +239,22 @@ static efi_status_t __init phys_efi_set_virtual_address_map(
        return status;
 }
 
-static int efi_set_rtc_mmss(unsigned long nowtime)
+static efi_status_t __init phys_efi_get_time(efi_time_t *tm,
+                                            efi_time_cap_t *tc)
+{
+       unsigned long flags;
+       efi_status_t status;
+
+       spin_lock_irqsave(&rtc_lock, flags);
+       efi_call_phys_prelog();
+       status = efi_call_phys2(efi_phys.get_time, virt_to_phys(tm),
+                               virt_to_phys(tc));
+       efi_call_phys_epilog();
+       spin_unlock_irqrestore(&rtc_lock, flags);
+       return status;
+}
+
+int efi_set_rtc_mmss(unsigned long nowtime)
 {
        int real_seconds, real_minutes;
        efi_status_t    status;
@@ -268,7 +283,7 @@ static int efi_set_rtc_mmss(unsigned long nowtime)
        return 0;
 }
 
-static unsigned long efi_get_time(void)
+unsigned long efi_get_time(void)
 {
        efi_status_t status;
        efi_time_t eft;
@@ -624,13 +639,18 @@ static int __init efi_runtime_init(void)
        }
        /*
         * We will only need *early* access to the following
-        * EFI runtime service before set_virtual_address_map
+        * two EFI runtime services before set_virtual_address_map
         * is invoked.
         */
+       efi_phys.get_time = (efi_get_time_t *)runtime->get_time;
        efi_phys.set_virtual_address_map =
                (efi_set_virtual_address_map_t *)
                runtime->set_virtual_address_map;
-
+       /*
+        * Make efi_get_time can be called before entering
+        * virtual mode.
+        */
+       efi.get_time = phys_efi_get_time;
        early_iounmap(runtime, sizeof(efi_runtime_services_t));
 
        return 0;
@@ -716,10 +736,12 @@ void __init efi_init(void)
                efi_enabled = 0;
                return;
        }
+#ifdef CONFIG_X86_32
        if (efi_is_native()) {
                x86_platform.get_wallclock = efi_get_time;
                x86_platform.set_wallclock = efi_set_rtc_mmss;
        }
+#endif
 
 #if EFI_DEBUG
        print_efi_memmap();
index 06c8b2e..95fd505 100644 (file)
@@ -58,21 +58,6 @@ static void __init early_code_mapping_set_exec(int executable)
        }
 }
 
-unsigned long efi_call_virt_prelog(void)
-{
-       unsigned long saved;
-
-       saved = read_cr3();
-       write_cr3(real_mode_header->trampoline_pgd);
-
-       return saved;
-}
-
-void efi_call_virt_epilog(unsigned long saved)
-{
-       write_cr3(saved);
-}
-
 void __init efi_call_phys_prelog(void)
 {
        unsigned long vaddress;
index 3f6d39d..b8858fb 100644 (file)
@@ -231,7 +231,7 @@ struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
         * we shouldn't allow anything to go through for a bypassing queue.
         */
        if (unlikely(blk_queue_bypass(q)))
-               return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
+               return ERR_PTR(blk_queue_dying(q) ? -EINVAL : -EBUSY);
        return __blkg_lookup_create(blkcg, q, NULL);
 }
 EXPORT_SYMBOL_GPL(blkg_lookup_create);
index 3c95c4d..c973249 100644 (file)
@@ -40,6 +40,7 @@
 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
+EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
 
 DEFINE_IDA(blk_queue_ida);
 
@@ -219,12 +220,13 @@ static void blk_delay_work(struct work_struct *work)
  * Description:
  *   Sometimes queueing needs to be postponed for a little while, to allow
  *   resources to come back. This function will make sure that queueing is
- *   restarted around the specified time.
+ *   restarted around the specified time. Queue lock must be held.
  */
 void blk_delay_queue(struct request_queue *q, unsigned long msecs)
 {
-       queue_delayed_work(kblockd_workqueue, &q->delay_work,
-                               msecs_to_jiffies(msecs));
+       if (likely(!blk_queue_dead(q)))
+               queue_delayed_work(kblockd_workqueue, &q->delay_work,
+                                  msecs_to_jiffies(msecs));
 }
 EXPORT_SYMBOL(blk_delay_queue);
 
@@ -293,6 +295,34 @@ void blk_sync_queue(struct request_queue *q)
 EXPORT_SYMBOL(blk_sync_queue);
 
 /**
+ * __blk_run_queue_uncond - run a queue whether or not it has been stopped
+ * @q: The queue to run
+ *
+ * Description:
+ *    Invoke request handling on a queue if there are any pending requests.
+ *    May be used to restart request handling after a request has completed.
+ *    This variant runs the queue whether or not the queue has been
+ *    stopped. Must be called with the queue lock held and interrupts
+ *    disabled. See also @blk_run_queue.
+ */
+inline void __blk_run_queue_uncond(struct request_queue *q)
+{
+       if (unlikely(blk_queue_dead(q)))
+               return;
+
+       /*
+        * Some request_fn implementations, e.g. scsi_request_fn(), unlock
+        * the queue lock internally. As a result multiple threads may be
+        * running such a request function concurrently. Keep track of the
+        * number of active request_fn invocations such that blk_drain_queue()
+        * can wait until all these request_fn calls have finished.
+        */
+       q->request_fn_active++;
+       q->request_fn(q);
+       q->request_fn_active--;
+}
+
+/**
  * __blk_run_queue - run a single device queue
  * @q: The queue to run
  *
@@ -305,7 +335,7 @@ void __blk_run_queue(struct request_queue *q)
        if (unlikely(blk_queue_stopped(q)))
                return;
 
-       q->request_fn(q);
+       __blk_run_queue_uncond(q);
 }
 EXPORT_SYMBOL(__blk_run_queue);
 
@@ -315,11 +345,11 @@ EXPORT_SYMBOL(__blk_run_queue);
  *
  * Description:
  *    Tells kblockd to perform the equivalent of @blk_run_queue on behalf
- *    of us.
+ *    of us. The caller must hold the queue lock.
  */
 void blk_run_queue_async(struct request_queue *q)
 {
-       if (likely(!blk_queue_stopped(q)))
+       if (likely(!blk_queue_stopped(q) && !blk_queue_dead(q)))
                mod_delayed_work(kblockd_workqueue, &q->delay_work, 0);
 }
 EXPORT_SYMBOL(blk_run_queue_async);
@@ -349,7 +379,7 @@ void blk_put_queue(struct request_queue *q)
 EXPORT_SYMBOL(blk_put_queue);
 
 /**
- * blk_drain_queue - drain requests from request_queue
+ * __blk_drain_queue - drain requests from request_queue
  * @q: queue to drain
  * @drain_all: whether to drain all requests or only the ones w/ ELVPRIV
  *
@@ -357,15 +387,17 @@ EXPORT_SYMBOL(blk_put_queue);
  * If not, only ELVPRIV requests are drained.  The caller is responsible
  * for ensuring that no new requests which need to be drained are queued.
  */
-void blk_drain_queue(struct request_queue *q, bool drain_all)
+static void __blk_drain_queue(struct request_queue *q, bool drain_all)
+       __releases(q->queue_lock)
+       __acquires(q->queue_lock)
 {
        int i;
 
+       lockdep_assert_held(q->queue_lock);
+
        while (true) {
                bool drain = false;
 
-               spin_lock_irq(q->queue_lock);
-
                /*
                 * The caller might be trying to drain @q before its
                 * elevator is initialized.
@@ -386,6 +418,7 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
                        __blk_run_queue(q);
 
                drain |= q->nr_rqs_elvpriv;
+               drain |= q->request_fn_active;
 
                /*
                 * Unfortunately, requests are queued at and tracked from
@@ -401,11 +434,14 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
                        }
                }
 
-               spin_unlock_irq(q->queue_lock);
-
                if (!drain)
                        break;
+
+               spin_unlock_irq(q->queue_lock);
+
                msleep(10);
+
+               spin_lock_irq(q->queue_lock);
        }
 
        /*
@@ -416,13 +452,9 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
        if (q->request_fn) {
                struct request_list *rl;
 
-               spin_lock_irq(q->queue_lock);
-
                blk_queue_for_each_rl(rl, q)
                        for (i = 0; i < ARRAY_SIZE(rl->wait); i++)
                                wake_up_all(&rl->wait[i]);
-
-               spin_unlock_irq(q->queue_lock);
        }
 }
 
@@ -446,7 +478,10 @@ void blk_queue_bypass_start(struct request_queue *q)
        spin_unlock_irq(q->queue_lock);
 
        if (drain) {
-               blk_drain_queue(q, false);
+               spin_lock_irq(q->queue_lock);
+               __blk_drain_queue(q, false);
+               spin_unlock_irq(q->queue_lock);
+
                /* ensure blk_queue_bypass() is %true inside RCU read lock */
                synchronize_rcu();
        }
@@ -473,20 +508,20 @@ EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
  * blk_cleanup_queue - shutdown a request queue
  * @q: request queue to shutdown
  *
- * Mark @q DEAD, drain all pending requests, destroy and put it.  All
- * future requests will be failed immediately with -ENODEV.
+ * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and
+ * put it.  All future requests will be failed immediately with -ENODEV.
  */
 void blk_cleanup_queue(struct request_queue *q)
 {
        spinlock_t *lock = q->queue_lock;
 
-       /* mark @q DEAD, no new request or merges will be allowed afterwards */
+       /* mark @q DYING, no new request or merges will be allowed afterwards */
        mutex_lock(&q->sysfs_lock);
-       queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
+       queue_flag_set_unlocked(QUEUE_FLAG_DYING, q);
        spin_lock_irq(lock);
 
        /*
-        * Dead queue is permanently in bypass mode till released.  Note
+        * A dying queue is permanently in bypass mode till released.  Note
         * that, unlike blk_queue_bypass_start(), we aren't performing
         * synchronize_rcu() after entering bypass mode to avoid the delay
         * as some drivers create and destroy a lot of queues while
@@ -499,12 +534,18 @@ void blk_cleanup_queue(struct request_queue *q)
 
        queue_flag_set(QUEUE_FLAG_NOMERGES, q);
        queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
-       queue_flag_set(QUEUE_FLAG_DEAD, q);
+       queue_flag_set(QUEUE_FLAG_DYING, q);
        spin_unlock_irq(lock);
        mutex_unlock(&q->sysfs_lock);
 
-       /* drain all requests queued before DEAD marking */
-       blk_drain_queue(q, true);
+       /*
+        * Drain all requests queued before DYING marking. Set DEAD flag to
+        * prevent that q->request_fn() gets invoked after draining finished.
+        */
+       spin_lock_irq(lock);
+       __blk_drain_queue(q, true);
+       queue_flag_set(QUEUE_FLAG_DEAD, q);
+       spin_unlock_irq(lock);
 
        /* @q won't process any more request, flush async actions */
        del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
@@ -549,7 +590,7 @@ void blk_exit_rl(struct request_list *rl)
 
 struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
 {
-       return blk_alloc_queue_node(gfp_mask, -1);
+       return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE);
 }
 EXPORT_SYMBOL(blk_alloc_queue);
 
@@ -660,7 +701,7 @@ EXPORT_SYMBOL(blk_alloc_queue_node);
 
 struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
 {
-       return blk_init_queue_node(rfn, lock, -1);
+       return blk_init_queue_node(rfn, lock, NUMA_NO_NODE);
 }
 EXPORT_SYMBOL(blk_init_queue);
 
@@ -716,7 +757,7 @@ EXPORT_SYMBOL(blk_init_allocated_queue);
 
 bool blk_get_queue(struct request_queue *q)
 {
-       if (likely(!blk_queue_dead(q))) {
+       if (likely(!blk_queue_dying(q))) {
                __blk_get_queue(q);
                return true;
        }
@@ -870,7 +911,7 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
        const bool is_sync = rw_is_sync(rw_flags) != 0;
        int may_queue;
 
-       if (unlikely(blk_queue_dead(q)))
+       if (unlikely(blk_queue_dying(q)))
                return NULL;
 
        may_queue = elv_may_queue(q, rw_flags);
@@ -1050,7 +1091,7 @@ retry:
        if (rq)
                return rq;
 
-       if (!(gfp_mask & __GFP_WAIT) || unlikely(blk_queue_dead(q))) {
+       if (!(gfp_mask & __GFP_WAIT) || unlikely(blk_queue_dying(q))) {
                blk_put_rl(rl);
                return NULL;
        }
@@ -1910,7 +1951,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
                return -EIO;
 
        spin_lock_irqsave(q->queue_lock, flags);
-       if (unlikely(blk_queue_dead(q))) {
+       if (unlikely(blk_queue_dying(q))) {
                spin_unlock_irqrestore(q->queue_lock, flags);
                return -ENODEV;
        }
@@ -2884,27 +2925,11 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth,
 {
        trace_block_unplug(q, depth, !from_schedule);
 
-       /*
-        * Don't mess with dead queue.
-        */
-       if (unlikely(blk_queue_dead(q))) {
-               spin_unlock(q->queue_lock);
-               return;
-       }
-
-       /*
-        * If we are punting this to kblockd, then we can safely drop
-        * the queue_lock before waking kblockd (which needs to take
-        * this lock).
-        */
-       if (from_schedule) {
-               spin_unlock(q->queue_lock);
+       if (from_schedule)
                blk_run_queue_async(q);
-       } else {
+       else
                __blk_run_queue(q);
-               spin_unlock(q->queue_lock);
-       }
-
+       spin_unlock(q->queue_lock);
 }
 
 static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
@@ -2996,7 +3021,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
                /*
                 * Short-circuit if @q is dead
                 */
-               if (unlikely(blk_queue_dead(q))) {
+               if (unlikely(blk_queue_dying(q))) {
                        __blk_end_request_all(rq, -ENODEV);
                        continue;
                }
index f71eac3..74638ec 100644 (file)
@@ -66,7 +66,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
 
        spin_lock_irq(q->queue_lock);
 
-       if (unlikely(blk_queue_dead(q))) {
+       if (unlikely(blk_queue_dying(q))) {
                rq->errors = -ENXIO;
                if (rq->end_io)
                        rq->end_io(rq, rq->errors);
@@ -78,7 +78,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
        __blk_run_queue(q);
        /* the queue is stopped so it won't be run */
        if (is_pm_resume)
-               q->request_fn(q);
+               __blk_run_queue_uncond(q);
        spin_unlock_irq(q->queue_lock);
 }
 EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
index 9373b58..b3a1f2b 100644 (file)
@@ -43,11 +43,12 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
        DECLARE_COMPLETION_ONSTACK(wait);
        struct request_queue *q = bdev_get_queue(bdev);
        int type = REQ_WRITE | REQ_DISCARD;
-       unsigned int max_discard_sectors;
-       unsigned int granularity, alignment, mask;
+       sector_t max_discard_sectors;
+       sector_t granularity, alignment;
        struct bio_batch bb;
        struct bio *bio;
        int ret = 0;
+       struct blk_plug plug;
 
        if (!q)
                return -ENXIO;
@@ -57,15 +58,16 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
 
        /* Zero-sector (unknown) and one-sector granularities are the same.  */
        granularity = max(q->limits.discard_granularity >> 9, 1U);
-       mask = granularity - 1;
-       alignment = (bdev_discard_alignment(bdev) >> 9) & mask;
+       alignment = bdev_discard_alignment(bdev) >> 9;
+       alignment = sector_div(alignment, granularity);
 
        /*
         * Ensure that max_discard_sectors is of the proper
         * granularity, so that requests stay aligned after a split.
         */
        max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
-       max_discard_sectors = round_down(max_discard_sectors, granularity);
+       sector_div(max_discard_sectors, granularity);
+       max_discard_sectors *= granularity;
        if (unlikely(!max_discard_sectors)) {
                /* Avoid infinite loop below. Being cautious never hurts. */
                return -EOPNOTSUPP;
@@ -81,9 +83,10 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
        bb.flags = 1 << BIO_UPTODATE;
        bb.wait = &wait;
 
+       blk_start_plug(&plug);
        while (nr_sects) {
                unsigned int req_sects;
-               sector_t end_sect;
+               sector_t end_sect, tmp;
 
                bio = bio_alloc(gfp_mask, 1);
                if (!bio) {
@@ -98,10 +101,12 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
                 * misaligned, stop the discard at the previous aligned sector.
                 */
                end_sect = sector + req_sects;
-               if (req_sects < nr_sects && (end_sect & mask) != alignment) {
-                       end_sect =
-                               round_down(end_sect - alignment, granularity)
-                               + alignment;
+               tmp = end_sect;
+               if (req_sects < nr_sects &&
+                   sector_div(tmp, granularity) != alignment) {
+                       end_sect = end_sect - alignment;
+                       sector_div(end_sect, granularity);
+                       end_sect = end_sect * granularity + alignment;
                        req_sects = end_sect - sector;
                }
 
@@ -117,6 +122,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
                atomic_inc(&bb.done);
                submit_bio(type, bio);
        }
+       blk_finish_plug(&plug);
 
        /* Wait for bios in-flight */
        if (!atomic_dec_and_test(&bb.done))
index 779bb76..c50ecf0 100644 (file)
@@ -611,7 +611,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
                        bottom = b->discard_granularity + alignment;
 
                        /* Verify that top and bottom intervals line up */
-                       if (max(top, bottom) & (min(top, bottom) - 1))
+                       if ((max(top, bottom) % min(top, bottom)) != 0)
                                t->discard_misaligned = 1;
                }
 
@@ -619,8 +619,8 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
                                                      b->max_discard_sectors);
                t->discard_granularity = max(t->discard_granularity,
                                             b->discard_granularity);
-               t->discard_alignment = lcm(t->discard_alignment, alignment) &
-                       (t->discard_granularity - 1);
+               t->discard_alignment = lcm(t->discard_alignment, alignment) %
+                       t->discard_granularity;
        }
 
        return ret;
index ce62046..7881477 100644 (file)
@@ -466,7 +466,7 @@ queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
        if (!entry->show)
                return -EIO;
        mutex_lock(&q->sysfs_lock);
-       if (blk_queue_dead(q)) {
+       if (blk_queue_dying(q)) {
                mutex_unlock(&q->sysfs_lock);
                return -ENOENT;
        }
@@ -488,7 +488,7 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
 
        q = container_of(kobj, struct request_queue, kobj);
        mutex_lock(&q->sysfs_lock);
-       if (blk_queue_dead(q)) {
+       if (blk_queue_dying(q)) {
                mutex_unlock(&q->sysfs_lock);
                return -ENOENT;
        }
index a9664fa..3114622 100644 (file)
@@ -302,7 +302,7 @@ static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td,
                /* if %NULL and @q is alive, fall back to root_tg */
                if (!IS_ERR(blkg))
                        tg = blkg_to_tg(blkg);
-               else if (!blk_queue_dead(q))
+               else if (!blk_queue_dying(q))
                        tg = td_root_tg(td);
        }
 
index ca51543..47fdfdd 100644 (file)
@@ -96,7 +96,7 @@ static inline struct request *__elv_next_request(struct request_queue *q)
                        q->flush_queue_delayed = 1;
                        return NULL;
                }
-               if (unlikely(blk_queue_dead(q)) ||
+               if (unlikely(blk_queue_dying(q)) ||
                    !q->elevator->type->ops.elevator_dispatch_fn(q, 0))
                        return NULL;
        }
@@ -145,6 +145,8 @@ int blk_try_merge(struct request *rq, struct bio *bio);
 
 void blk_queue_congestion_threshold(struct request_queue *q);
 
+void __blk_run_queue_uncond(struct request_queue *q);
+
 int blk_dev_init(void);
 
 
index deee61f..650f427 100644 (file)
@@ -151,19 +151,6 @@ failjob_rls_job:
        return -ENOMEM;
 }
 
-/*
- * bsg_goose_queue - restart queue in case it was stopped
- * @q: request q to be restarted
- */
-void bsg_goose_queue(struct request_queue *q)
-{
-       if (!q)
-               return;
-
-       blk_run_queue_async(q);
-}
-EXPORT_SYMBOL_GPL(bsg_goose_queue);
-
 /**
  * bsg_request_fn - generic handler for bsg requests
  * @q: request queue to manage
index fb52df9..e62e920 100644 (file)
@@ -1973,7 +1973,8 @@ cfq_merged_requests(struct request_queue *q, struct request *rq,
         * reposition in fifo if next is older than rq
         */
        if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
-           time_before(rq_fifo_time(next), rq_fifo_time(rq))) {
+           time_before(rq_fifo_time(next), rq_fifo_time(rq)) &&
+           cfqq == RQ_CFQQ(next)) {
                list_move(&rq->queuelist, &next->queuelist);
                rq_set_fifo_time(rq, rq_fifo_time(next));
        }
index 599b12e..90037b5 100644 (file)
@@ -230,7 +230,7 @@ static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
        /*
         * rq is expired!
         */
-       if (time_after(jiffies, rq_fifo_time(rq)))
+       if (time_after_eq(jiffies, rq_fifo_time(rq)))
                return 1;
 
        return 0;
index 9b1d42b..9edba1b 100644 (file)
@@ -458,6 +458,7 @@ static bool elv_attempt_insert_merge(struct request_queue *q,
                                     struct request *rq)
 {
        struct request *__rq;
+       bool ret;
 
        if (blk_queue_nomerges(q))
                return false;
@@ -471,14 +472,21 @@ static bool elv_attempt_insert_merge(struct request_queue *q,
        if (blk_queue_noxmerges(q))
                return false;
 
+       ret = false;
        /*
         * See if our hash lookup can find a potential backmerge.
         */
-       __rq = elv_rqhash_find(q, blk_rq_pos(rq));
-       if (__rq && blk_attempt_req_merge(q, __rq, rq))
-               return true;
+       while (1) {
+               __rq = elv_rqhash_find(q, blk_rq_pos(rq));
+               if (!__rq || !blk_attempt_req_merge(q, __rq, rq))
+                       break;
 
-       return false;
+               /* The merged request could be merged with others, try again */
+               ret = true;
+               rq = __rq;
+       }
+
+       return ret;
 }
 
 void elv_merged_request(struct request_queue *q, struct request *rq, int type)
index 6cace66..2a6fdf5 100644 (file)
@@ -1245,7 +1245,7 @@ EXPORT_SYMBOL(blk_lookup_devt);
 
 struct gendisk *alloc_disk(int minors)
 {
-       return alloc_disk_node(minors, -1);
+       return alloc_disk_node(minors, NUMA_NO_NODE);
 }
 EXPORT_SYMBOL(alloc_disk);
 
index cb5f0a3..75a54e1 100644 (file)
@@ -234,8 +234,8 @@ config KARMA_PARTITION
          uses a proprietary partition table.
 
 config EFI_PARTITION
-       bool "EFI GUID Partition support"
-       depends on PARTITION_ADVANCED
+       bool "EFI GUID Partition support" if PARTITION_ADVANCED
+       default y
        select CRC32
        help
          Say Y here if you would like to use hard disks under Linux which
index bd5de08..0576a7d 100644 (file)
@@ -157,6 +157,7 @@ int tegra_ahb_enable_smmu(struct device_node *dn)
 EXPORT_SYMBOL(tegra_ahb_enable_smmu);
 #endif
 
+#ifdef CONFIG_PM_SLEEP
 static int tegra_ahb_suspend(struct device *dev)
 {
        int i;
@@ -176,6 +177,7 @@ static int tegra_ahb_resume(struct device *dev)
                gizmo_writel(ahb, ahb->ctx[i], tegra_ahb_gizmo[i]);
        return 0;
 }
+#endif
 
 static UNIVERSAL_DEV_PM_OPS(tegra_ahb_pm,
                            tegra_ahb_suspend,
index bbec35d..0f51ed6 100644 (file)
@@ -6,6 +6,7 @@ menu "Bus devices"
 
 config OMAP_OCP2SCP
        tristate "OMAP OCP2SCP DRIVER"
+       depends on ARCH_OMAP2PLUS
        help
          Driver to enable ocp2scp module which transforms ocp interface
          protocol to scp protocol. In OMAP4, USB PHY is connected via
index 6ec0fff..1042c1b 100644 (file)
 #define I810_PTE_LOCAL         0x00000002
 #define I810_PTE_VALID         0x00000001
 #define I830_PTE_SYSTEM_CACHED  0x00000006
-/* GT PTE cache control fields */
-#define GEN6_PTE_UNCACHED      0x00000002
-#define HSW_PTE_UNCACHED       0x00000000
-#define GEN6_PTE_LLC           0x00000004
-#define GEN6_PTE_LLC_MLC       0x00000006
-#define GEN6_PTE_GFDT          0x00000008
 
 #define I810_SMRAM_MISCC       0x70
 #define I810_GFX_MEM_WIN_SIZE  0x00010000
@@ -97,7 +91,6 @@
 #define G4x_GMCH_SIZE_VT_2M    (G4x_GMCH_SIZE_2M | G4x_GMCH_SIZE_VT_EN)
 
 #define GFX_FLSH_CNTL          0x2170 /* 915+ */
-#define GFX_FLSH_CNTL_VLV      0x101008
 
 #define I810_DRAM_CTL          0x3000
 #define I810_DRAM_ROW_0                0x00000001
 #define INTEL_I7505_AGPCTRL    0x70
 #define INTEL_I7505_MCHCFG     0x50
 
-#define SNB_GMCH_CTRL  0x50
-#define SNB_GMCH_GMS_STOLEN_MASK       0xF8
-#define SNB_GMCH_GMS_STOLEN_32M                (1 << 3)
-#define SNB_GMCH_GMS_STOLEN_64M                (2 << 3)
-#define SNB_GMCH_GMS_STOLEN_96M                (3 << 3)
-#define SNB_GMCH_GMS_STOLEN_128M       (4 << 3)
-#define SNB_GMCH_GMS_STOLEN_160M       (5 << 3)
-#define SNB_GMCH_GMS_STOLEN_192M       (6 << 3)
-#define SNB_GMCH_GMS_STOLEN_224M       (7 << 3)
-#define SNB_GMCH_GMS_STOLEN_256M       (8 << 3)
-#define SNB_GMCH_GMS_STOLEN_288M       (9 << 3)
-#define SNB_GMCH_GMS_STOLEN_320M       (0xa << 3)
-#define SNB_GMCH_GMS_STOLEN_352M       (0xb << 3)
-#define SNB_GMCH_GMS_STOLEN_384M       (0xc << 3)
-#define SNB_GMCH_GMS_STOLEN_416M       (0xd << 3)
-#define SNB_GMCH_GMS_STOLEN_448M       (0xe << 3)
-#define SNB_GMCH_GMS_STOLEN_480M       (0xf << 3)
-#define SNB_GMCH_GMS_STOLEN_512M       (0x10 << 3)
-#define SNB_GTT_SIZE_0M                        (0 << 8)
-#define SNB_GTT_SIZE_1M                        (1 << 8)
-#define SNB_GTT_SIZE_2M                        (2 << 8)
-#define SNB_GTT_SIZE_MASK              (3 << 8)
-
 /* pci devices ids */
 #define PCI_DEVICE_ID_INTEL_E7221_HB   0x2588
 #define PCI_DEVICE_ID_INTEL_E7221_IG   0x258a
 #define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB         0x0062
 #define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB    0x006a
 #define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG          0x0046
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB             0x0100  /* Desktop */
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG         0x0102
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG         0x0112
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG    0x0122
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB           0x0104  /* Mobile */
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG       0x0106
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG       0x0116
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG  0x0126
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB           0x0108  /* Server */
-#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG           0x010A
-#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_HB               0x0150  /* Desktop */
-#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT1_IG           0x0152
-#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT2_IG           0x0162
-#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_HB             0x0154  /* Mobile */
-#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT1_IG         0x0156
-#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT2_IG         0x0166
-#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_HB             0x0158  /* Server */
-#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG         0x015A
-#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG         0x016A
-#define PCI_DEVICE_ID_INTEL_VALLEYVIEW_HB              0x0F00 /* VLV1 */
-#define PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG              0x0F30
-#define PCI_DEVICE_ID_INTEL_HASWELL_HB                 0x0400 /* Desktop */
-#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG           0x0402
-#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG           0x0412
-#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG      0x0422
-#define PCI_DEVICE_ID_INTEL_HASWELL_M_HB               0x0404 /* Mobile */
-#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG           0x0406
-#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG           0x0416
-#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG      0x0426
-#define PCI_DEVICE_ID_INTEL_HASWELL_S_HB               0x0408 /* Server */
-#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG           0x040a
-#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG           0x041a
-#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG      0x042a
-#define PCI_DEVICE_ID_INTEL_HASWELL_E_HB               0x0c04
-#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG       0x0C02
-#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG       0x0C12
-#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG  0x0C22
-#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG       0x0C06
-#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG       0x0C16
-#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG  0x0C26
-#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG       0x0C0A
-#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG       0x0C1A
-#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG  0x0C2A
-#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG       0x0A02
-#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG       0x0A12
-#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG  0x0A22
-#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG       0x0A06
-#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG       0x0A16
-#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG  0x0A26
-#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG       0x0A0A
-#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG       0x0A1A
-#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG  0x0A2A
-#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG       0x0D12
-#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG       0x0D22
-#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG  0x0D32
-#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG       0x0D16
-#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG       0x0D26
-#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG  0x0D36
-#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG       0x0D1A
-#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG       0x0D2A
-#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG  0x0D3A
 
 #endif
index 38390f7..dbd901e 100644 (file)
@@ -367,62 +367,6 @@ static unsigned int intel_gtt_stolen_size(void)
                        stolen_size = 0;
                        break;
                }
-       } else if (INTEL_GTT_GEN == 6) {
-               /*
-                * SandyBridge has new memory control reg at 0x50.w
-                */
-               u16 snb_gmch_ctl;
-               pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
-               switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
-               case SNB_GMCH_GMS_STOLEN_32M:
-                       stolen_size = MB(32);
-                       break;
-               case SNB_GMCH_GMS_STOLEN_64M:
-                       stolen_size = MB(64);
-                       break;
-               case SNB_GMCH_GMS_STOLEN_96M:
-                       stolen_size = MB(96);
-                       break;
-               case SNB_GMCH_GMS_STOLEN_128M:
-                       stolen_size = MB(128);
-                       break;
-               case SNB_GMCH_GMS_STOLEN_160M:
-                       stolen_size = MB(160);
-                       break;
-               case SNB_GMCH_GMS_STOLEN_192M:
-                       stolen_size = MB(192);
-                       break;
-               case SNB_GMCH_GMS_STOLEN_224M:
-                       stolen_size = MB(224);
-                       break;
-               case SNB_GMCH_GMS_STOLEN_256M:
-                       stolen_size = MB(256);
-                       break;
-               case SNB_GMCH_GMS_STOLEN_288M:
-                       stolen_size = MB(288);
-                       break;
-               case SNB_GMCH_GMS_STOLEN_320M:
-                       stolen_size = MB(320);
-                       break;
-               case SNB_GMCH_GMS_STOLEN_352M:
-                       stolen_size = MB(352);
-                       break;
-               case SNB_GMCH_GMS_STOLEN_384M:
-                       stolen_size = MB(384);
-                       break;
-               case SNB_GMCH_GMS_STOLEN_416M:
-                       stolen_size = MB(416);
-                       break;
-               case SNB_GMCH_GMS_STOLEN_448M:
-                       stolen_size = MB(448);
-                       break;
-               case SNB_GMCH_GMS_STOLEN_480M:
-                       stolen_size = MB(480);
-                       break;
-               case SNB_GMCH_GMS_STOLEN_512M:
-                       stolen_size = MB(512);
-                       break;
-               }
        } else {
                switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
                case I855_GMCH_GMS_STOLEN_1M:
@@ -556,29 +500,9 @@ static unsigned int i965_gtt_total_entries(void)
 
 static unsigned int intel_gtt_total_entries(void)
 {
-       int size;
-
        if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5)
                return i965_gtt_total_entries();
-       else if (INTEL_GTT_GEN == 6) {
-               u16 snb_gmch_ctl;
-
-               pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
-               switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
-               default:
-               case SNB_GTT_SIZE_0M:
-                       printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
-                       size = MB(0);
-                       break;
-               case SNB_GTT_SIZE_1M:
-                       size = MB(1);
-                       break;
-               case SNB_GTT_SIZE_2M:
-                       size = MB(2);
-                       break;
-               }
-               return size/4;
-       } else {
+       else {
                /* On previous hardware, the GTT size was just what was
                 * required to map the aperture.
                 */
@@ -778,9 +702,6 @@ bool intel_enable_gtt(void)
 {
        u8 __iomem *reg;
 
-       if (INTEL_GTT_GEN >= 6)
-           return true;
-
        if (INTEL_GTT_GEN == 2) {
                u16 gmch_ctrl;
 
@@ -1149,85 +1070,6 @@ static void i965_write_entry(dma_addr_t addr,
        writel(addr | pte_flags, intel_private.gtt + entry);
 }
 
-static bool gen6_check_flags(unsigned int flags)
-{
-       return true;
-}
-
-static void haswell_write_entry(dma_addr_t addr, unsigned int entry,
-                               unsigned int flags)
-{
-       unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
-       unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
-       u32 pte_flags;
-
-       if (type_mask == AGP_USER_MEMORY)
-               pte_flags = HSW_PTE_UNCACHED | I810_PTE_VALID;
-       else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
-               pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
-               if (gfdt)
-                       pte_flags |= GEN6_PTE_GFDT;
-       } else { /* set 'normal'/'cached' to LLC by default */
-               pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
-               if (gfdt)
-                       pte_flags |= GEN6_PTE_GFDT;
-       }
-
-       /* gen6 has bit11-4 for physical addr bit39-32 */
-       addr |= (addr >> 28) & 0xff0;
-       writel(addr | pte_flags, intel_private.gtt + entry);
-}
-
-static void gen6_write_entry(dma_addr_t addr, unsigned int entry,
-                            unsigned int flags)
-{
-       unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
-       unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
-       u32 pte_flags;
-
-       if (type_mask == AGP_USER_MEMORY)
-               pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
-       else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
-               pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
-               if (gfdt)
-                       pte_flags |= GEN6_PTE_GFDT;
-       } else { /* set 'normal'/'cached' to LLC by default */
-               pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
-               if (gfdt)
-                       pte_flags |= GEN6_PTE_GFDT;
-       }
-
-       /* gen6 has bit11-4 for physical addr bit39-32 */
-       addr |= (addr >> 28) & 0xff0;
-       writel(addr | pte_flags, intel_private.gtt + entry);
-}
-
-static void valleyview_write_entry(dma_addr_t addr, unsigned int entry,
-                                  unsigned int flags)
-{
-       unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
-       unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
-       u32 pte_flags;
-
-       if (type_mask == AGP_USER_MEMORY)
-               pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
-       else {
-               pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
-               if (gfdt)
-                       pte_flags |= GEN6_PTE_GFDT;
-       }
-
-       /* gen6 has bit11-4 for physical addr bit39-32 */
-       addr |= (addr >> 28) & 0xff0;
-       writel(addr | pte_flags, intel_private.gtt + entry);
-
-       writel(1, intel_private.registers + GFX_FLSH_CNTL_VLV);
-}
-
-static void gen6_cleanup(void)
-{
-}
-
 /* Certain Gen5 chipsets require require idling the GPU before
  * unmapping anything from the GTT when VT-d is enabled.
  */
@@ -1249,41 +1091,29 @@ static inline int needs_idle_maps(void)
 
 static int i9xx_setup(void)
 {
-       u32 reg_addr;
+       u32 reg_addr, gtt_addr;
        int size = KB(512);
 
        pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &reg_addr);
 
        reg_addr &= 0xfff80000;
 
-       if (INTEL_GTT_GEN >= 7)
-               size = MB(2);
-
        intel_private.registers = ioremap(reg_addr, size);
        if (!intel_private.registers)
                return -ENOMEM;
 
-       if (INTEL_GTT_GEN == 3) {
-               u32 gtt_addr;
-
+       switch (INTEL_GTT_GEN) {
+       case 3:
                pci_read_config_dword(intel_private.pcidev,
                                      I915_PTEADDR, &gtt_addr);
                intel_private.gtt_bus_addr = gtt_addr;
-       } else {
-               u32 gtt_offset;
-
-               switch (INTEL_GTT_GEN) {
-               case 5:
-               case 6:
-               case 7:
-                       gtt_offset = MB(2);
-                       break;
-               case 4:
-               default:
-                       gtt_offset =  KB(512);
-                       break;
-               }
-               intel_private.gtt_bus_addr = reg_addr + gtt_offset;
+               break;
+       case 5:
+               intel_private.gtt_bus_addr = reg_addr + MB(2);
+               break;
+       default:
+               intel_private.gtt_bus_addr = reg_addr + KB(512);
+               break;
        }
 
        if (needs_idle_maps())
@@ -1395,32 +1225,6 @@ static const struct intel_gtt_driver ironlake_gtt_driver = {
        .check_flags = i830_check_flags,
        .chipset_flush = i9xx_chipset_flush,
 };
-static const struct intel_gtt_driver sandybridge_gtt_driver = {
-       .gen = 6,
-       .setup = i9xx_setup,
-       .cleanup = gen6_cleanup,
-       .write_entry = gen6_write_entry,
-       .dma_mask_size = 40,
-       .check_flags = gen6_check_flags,
-       .chipset_flush = i9xx_chipset_flush,
-};
-static const struct intel_gtt_driver haswell_gtt_driver = {
-       .gen = 6,
-       .setup = i9xx_setup,
-       .cleanup = gen6_cleanup,
-       .write_entry = haswell_write_entry,
-       .dma_mask_size = 40,
-       .check_flags = gen6_check_flags,
-       .chipset_flush = i9xx_chipset_flush,
-};
-static const struct intel_gtt_driver valleyview_gtt_driver = {
-       .gen = 7,
-       .setup = i9xx_setup,
-       .cleanup = gen6_cleanup,
-       .write_entry = valleyview_write_entry,
-       .dma_mask_size = 40,
-       .check_flags = gen6_check_flags,
-};
 
 /* Table to describe Intel GMCH and AGP/PCIE GART drivers.  At least one of
  * driver and gmch_driver must be non-null, and find_gmch will determine
@@ -1501,106 +1305,6 @@ static const struct intel_gtt_driver_description {
            "HD Graphics", &ironlake_gtt_driver },
        { PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
            "HD Graphics", &ironlake_gtt_driver },
-       { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG,
-           "Sandybridge", &sandybridge_gtt_driver },
-       { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG,
-           "Sandybridge", &sandybridge_gtt_driver },
-       { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG,
-           "Sandybridge", &sandybridge_gtt_driver },
-       { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG,
-           "Sandybridge", &sandybridge_gtt_driver },
-       { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG,
-           "Sandybridge", &sandybridge_gtt_driver },
-       { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG,
-           "Sandybridge", &sandybridge_gtt_driver },
-       { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG,
-           "Sandybridge", &sandybridge_gtt_driver },
-       { PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT1_IG,
-           "Ivybridge", &sandybridge_gtt_driver },
-       { PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT2_IG,
-           "Ivybridge", &sandybridge_gtt_driver },
-       { PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT1_IG,
-           "Ivybridge", &sandybridge_gtt_driver },
-       { PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT2_IG,
-           "Ivybridge", &sandybridge_gtt_driver },
-       { PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG,
-           "Ivybridge", &sandybridge_gtt_driver },
-       { PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG,
-           "Ivybridge", &sandybridge_gtt_driver },
-       { PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG,
-           "ValleyView", &valleyview_gtt_driver },
-       { PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG,
-           "Haswell", &haswell_gtt_driver },
-       { PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG,
-           "Haswell", &haswell_gtt_driver },
-       { PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG,
-           "Haswell", &haswell_gtt_driver },
-       { PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG,
-           "Haswell", &haswell_gtt_driver },
-       { PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG,
-           "Haswell", &haswell_gtt_driver },
-       { PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG,
-           "Haswell", &haswell_gtt_driver },
-       { PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG,
-           "Haswell", &haswell_gtt_driver },
-       { PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG,
-           "Haswell", &haswell_gtt_driver },
-       { PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG,
-           "Haswell", &haswell_gtt_driver },
-       { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG,
-           "Haswell", &haswell_gtt_driver },
-       { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG,
-           "Haswell", &haswell_gtt_driver },
-       { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG,
-           "Haswell", &haswell_gtt_driver },
-       { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG,
-           "Haswell", &haswell_gtt_driver },
-       { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG,
-           "Haswell", &haswell_gtt_driver },
-       { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG,
-           "Haswell", &haswell_gtt_driver },
-       { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG,
-           "Haswell", &haswell_gtt_driver },
-       { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG,
-           "Haswell", &haswell_gtt_driver },
-       { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG,
-           "Haswell", &haswell_gtt_driver },
-       { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG,
-           "Haswell", &haswell_gtt_driver },
-       { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG,
-           "Haswell", &haswell_gtt_driver },
-       { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG,
-           "Haswell", &haswell_gtt_driver },
-       { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG,
-           "Haswell", &haswell_gtt_driver },
-       { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG,
-           "Haswell", &haswell_gtt_driver },
-       { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG,
-           "Haswell", &haswell_gtt_driver },
-       { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG,
-           "Haswell", &haswell_gtt_driver },
-       { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG,
-           "Haswell", &haswell_gtt_driver },
-       { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG,
-           "Haswell", &haswell_gtt_driver },
-       { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG,
-           "Haswell", &haswell_gtt_driver },
-       { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG,
-           "Haswell", &haswell_gtt_driver },
-       { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG,
-           "Haswell", &haswell_gtt_driver },
-       { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG,
-           "Haswell", &haswell_gtt_driver },
-       { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG,
-           "Haswell", &haswell_gtt_driver },
-       { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG,
-           "Haswell", &haswell_gtt_driver },
-       { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG,
-           "Haswell", &haswell_gtt_driver },
-       { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG,
-           "Haswell", &haswell_gtt_driver },
-       { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG,
-           "Haswell", &haswell_gtt_driver },
        { 0, NULL, NULL }
 };
 
@@ -1686,7 +1390,7 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
 }
 EXPORT_SYMBOL(intel_gmch_probe);
 
-const struct intel_gtt *intel_gtt_get(void)
+struct intel_gtt *intel_gtt_get(void)
 {
        return &intel_private.base;
 }
index 7da840d..9978609 100644 (file)
@@ -38,8 +38,6 @@ static struct vio_device_id tpm_ibmvtpm_device_table[] = {
 };
 MODULE_DEVICE_TABLE(vio, tpm_ibmvtpm_device_table);
 
-DECLARE_WAIT_QUEUE_HEAD(wq);
-
 /**
  * ibmvtpm_send_crq - Send a CRQ request
  * @vdev:      vio device struct
@@ -83,6 +81,7 @@ static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
 {
        struct ibmvtpm_dev *ibmvtpm;
        u16 len;
+       int sig;
 
        ibmvtpm = (struct ibmvtpm_dev *)chip->vendor.data;
 
@@ -91,22 +90,23 @@ static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
                return 0;
        }
 
-       wait_event_interruptible(wq, ibmvtpm->crq_res.len != 0);
+       sig = wait_event_interruptible(ibmvtpm->wq, ibmvtpm->res_len != 0);
+       if (sig)
+               return -EINTR;
+
+       len = ibmvtpm->res_len;
 
-       if (count < ibmvtpm->crq_res.len) {
+       if (count < len) {
                dev_err(ibmvtpm->dev,
                        "Invalid size in recv: count=%ld, crq_size=%d\n",
-                       count, ibmvtpm->crq_res.len);
+                       count, len);
                return -EIO;
        }
 
        spin_lock(&ibmvtpm->rtce_lock);
-       memcpy((void *)buf, (void *)ibmvtpm->rtce_buf, ibmvtpm->crq_res.len);
-       memset(ibmvtpm->rtce_buf, 0, ibmvtpm->crq_res.len);
-       ibmvtpm->crq_res.valid = 0;
-       ibmvtpm->crq_res.msg = 0;
-       len = ibmvtpm->crq_res.len;
-       ibmvtpm->crq_res.len = 0;
+       memcpy((void *)buf, (void *)ibmvtpm->rtce_buf, len);
+       memset(ibmvtpm->rtce_buf, 0, len);
+       ibmvtpm->res_len = 0;
        spin_unlock(&ibmvtpm->rtce_lock);
        return len;
 }
@@ -273,7 +273,6 @@ static int tpm_ibmvtpm_remove(struct vio_dev *vdev)
        int rc = 0;
 
        free_irq(vdev->irq, ibmvtpm);
-       tasklet_kill(&ibmvtpm->tasklet);
 
        do {
                if (rc)
@@ -372,7 +371,6 @@ static int ibmvtpm_reset_crq(struct ibmvtpm_dev *ibmvtpm)
 static int tpm_ibmvtpm_resume(struct device *dev)
 {
        struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(dev);
-       unsigned long flags;
        int rc = 0;
 
        do {
@@ -387,10 +385,11 @@ static int tpm_ibmvtpm_resume(struct device *dev)
                return rc;
        }
 
-       spin_lock_irqsave(&ibmvtpm->lock, flags);
-       vio_disable_interrupts(ibmvtpm->vdev);
-       tasklet_schedule(&ibmvtpm->tasklet);
-       spin_unlock_irqrestore(&ibmvtpm->lock, flags);
+       rc = vio_enable_interrupts(ibmvtpm->vdev);
+       if (rc) {
+               dev_err(dev, "Error vio_enable_interrupts rc=%d\n", rc);
+               return rc;
+       }
 
        rc = ibmvtpm_crq_send_init(ibmvtpm);
        if (rc)
@@ -467,7 +466,7 @@ static struct ibmvtpm_crq *ibmvtpm_crq_get_next(struct ibmvtpm_dev *ibmvtpm)
        if (crq->valid & VTPM_MSG_RES) {
                if (++crq_q->index == crq_q->num_entry)
                        crq_q->index = 0;
-               rmb();
+               smp_rmb();
        } else
                crq = NULL;
        return crq;
@@ -535,11 +534,9 @@ static void ibmvtpm_crq_process(struct ibmvtpm_crq *crq,
                        ibmvtpm->vtpm_version = crq->data;
                        return;
                case VTPM_TPM_COMMAND_RES:
-                       ibmvtpm->crq_res.valid = crq->valid;
-                       ibmvtpm->crq_res.msg = crq->msg;
-                       ibmvtpm->crq_res.len = crq->len;
-                       ibmvtpm->crq_res.data = crq->data;
-                       wake_up_interruptible(&wq);
+                       /* len of the data in rtce buffer */
+                       ibmvtpm->res_len = crq->len;
+                       wake_up_interruptible(&ibmvtpm->wq);
                        return;
                default:
                        return;
@@ -559,38 +556,19 @@ static void ibmvtpm_crq_process(struct ibmvtpm_crq *crq,
 static irqreturn_t ibmvtpm_interrupt(int irq, void *vtpm_instance)
 {
        struct ibmvtpm_dev *ibmvtpm = (struct ibmvtpm_dev *) vtpm_instance;
-       unsigned long flags;
-
-       spin_lock_irqsave(&ibmvtpm->lock, flags);
-       vio_disable_interrupts(ibmvtpm->vdev);
-       tasklet_schedule(&ibmvtpm->tasklet);
-       spin_unlock_irqrestore(&ibmvtpm->lock, flags);
-
-       return IRQ_HANDLED;
-}
-
-/**
- * ibmvtpm_tasklet - Interrupt handler tasklet
- * @data:      ibm vtpm device struct
- *
- * Returns:
- *     Nothing
- **/
-static void ibmvtpm_tasklet(void *data)
-{
-       struct ibmvtpm_dev *ibmvtpm = data;
        struct ibmvtpm_crq *crq;
-       unsigned long flags;
 
-       spin_lock_irqsave(&ibmvtpm->lock, flags);
+       /* while loop is needed for initial setup (get version and
+        * get rtce_size). There should be only one tpm request at any
+        * given time.
+        */
        while ((crq = ibmvtpm_crq_get_next(ibmvtpm)) != NULL) {
                ibmvtpm_crq_process(crq, ibmvtpm);
                crq->valid = 0;
-               wmb();
+               smp_wmb();
        }
 
-       vio_enable_interrupts(ibmvtpm->vdev);
-       spin_unlock_irqrestore(&ibmvtpm->lock, flags);
+       return IRQ_HANDLED;
 }
 
 /**
@@ -650,9 +628,6 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
                goto reg_crq_cleanup;
        }
 
-       tasklet_init(&ibmvtpm->tasklet, (void *)ibmvtpm_tasklet,
-                    (unsigned long)ibmvtpm);
-
        rc = request_irq(vio_dev->irq, ibmvtpm_interrupt, 0,
                         tpm_ibmvtpm_driver_name, ibmvtpm);
        if (rc) {
@@ -666,13 +641,14 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
                goto init_irq_cleanup;
        }
 
+       init_waitqueue_head(&ibmvtpm->wq);
+
        crq_q->index = 0;
 
        ibmvtpm->dev = dev;
        ibmvtpm->vdev = vio_dev;
        chip->vendor.data = (void *)ibmvtpm;
 
-       spin_lock_init(&ibmvtpm->lock);
        spin_lock_init(&ibmvtpm->rtce_lock);
 
        rc = ibmvtpm_crq_send_init(ibmvtpm);
@@ -689,7 +665,6 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
 
        return rc;
 init_irq_cleanup:
-       tasklet_kill(&ibmvtpm->tasklet);
        do {
                rc1 = plpar_hcall_norets(H_FREE_CRQ, vio_dev->unit_address);
        } while (rc1 == H_BUSY || H_IS_LONG_BUSY(rc1));
index 4296eb4..bd82a79 100644 (file)
@@ -38,13 +38,12 @@ struct ibmvtpm_dev {
        struct vio_dev *vdev;
        struct ibmvtpm_crq_queue crq_queue;
        dma_addr_t crq_dma_handle;
-       spinlock_t lock;
-       struct tasklet_struct tasklet;
        u32 rtce_size;
        void __iomem *rtce_buf;
        dma_addr_t rtce_dma_handle;
        spinlock_t rtce_lock;
-       struct ibmvtpm_crq crq_res;
+       wait_queue_head_t wq;
+       u16 res_len;
        u32 vtpm_version;
 };
 
index f10f05d..414aed5 100644 (file)
@@ -166,6 +166,7 @@ static irqreturn_t arizona_micdet(int irq, void *data)
        ret = regmap_read(arizona->regmap, ARIZONA_MIC_DETECT_3, &val);
        if (ret != 0) {
                dev_err(arizona->dev, "Failed to read MICDET: %d\n", ret);
+               mutex_unlock(&info->lock);
                return IRQ_NONE;
        }
 
index d398821..60adc04 100644 (file)
@@ -472,7 +472,7 @@ int extcon_register_interest(struct extcon_specific_cable_nb *obj,
 
                obj->cable_index = extcon_find_cable_index(obj->edev, cable_name);
                if (obj->cable_index < 0)
-                       return -ENODEV;
+                       return obj->cable_index;
 
                obj->user_nb = nb;
 
index b656dfa..8c17b65 100644 (file)
@@ -657,17 +657,17 @@ static int max77693_muic_probe(struct platform_device *pdev)
        int ret, i;
        u8 id;
 
-       info = kzalloc(sizeof(struct max77693_muic_info), GFP_KERNEL);
+       info = devm_kzalloc(&pdev->dev, sizeof(struct max77693_muic_info),
+                                  GFP_KERNEL);
        if (!info) {
                dev_err(&pdev->dev, "failed to allocate memory\n");
-               ret = -ENOMEM;
-               goto err_kfree;
+               return -ENOMEM;
        }
        info->dev = &pdev->dev;
        info->max77693 = max77693;
-       if (info->max77693->regmap_muic)
+       if (info->max77693->regmap_muic) {
                dev_dbg(&pdev->dev, "allocate register map\n");
-       else {
+       else {
                info->max77693->regmap_muic = devm_regmap_init_i2c(
                                                info->max77693->muic,
                                                &max77693_muic_regmap_config);
@@ -675,7 +675,7 @@ static int max77693_muic_probe(struct platform_device *pdev)
                        ret = PTR_ERR(info->max77693->regmap_muic);
                        dev_err(max77693->dev,
                                "failed to allocate register map: %d\n", ret);
-                       goto err_regmap;
+                       return ret;
                }
        }
        platform_set_drvdata(pdev, info);
@@ -686,11 +686,13 @@ static int max77693_muic_probe(struct platform_device *pdev)
        /* Support irq domain for MAX77693 MUIC device */
        for (i = 0; i < ARRAY_SIZE(muic_irqs); i++) {
                struct max77693_muic_irq *muic_irq = &muic_irqs[i];
-               int virq = 0;
+               unsigned int virq = 0;
 
                virq = irq_create_mapping(max77693->irq_domain, muic_irq->irq);
-               if (!virq)
+               if (!virq) {
+                       ret = -EINVAL;
                        goto err_irq;
+               }
                muic_irq->virq = virq;
 
                ret = request_threaded_irq(virq, NULL,
@@ -702,14 +704,13 @@ static int max77693_muic_probe(struct platform_device *pdev)
                                " error :%d)\n",
                                muic_irq->irq, ret);
 
-                       for (i = i - 1; i >= 0; i--)
-                               free_irq(muic_irq->virq, info);
                        goto err_irq;
                }
        }
 
        /* Initialize extcon device */
-       info->edev = kzalloc(sizeof(struct extcon_dev), GFP_KERNEL);
+       info->edev = devm_kzalloc(&pdev->dev, sizeof(struct extcon_dev),
+                                 GFP_KERNEL);
        if (!info->edev) {
                dev_err(&pdev->dev, "failed to allocate memory for extcon\n");
                ret = -ENOMEM;
@@ -720,7 +721,7 @@ static int max77693_muic_probe(struct platform_device *pdev)
        ret = extcon_dev_register(info->edev, NULL);
        if (ret) {
                dev_err(&pdev->dev, "failed to register extcon device\n");
-               goto err_extcon;
+               goto err_irq;
        }
 
        /* Initialize MUIC register by using platform data */
@@ -753,7 +754,7 @@ static int max77693_muic_probe(struct platform_device *pdev)
                        MAX77693_MUIC_REG_ID, &id);
        if (ret < 0) {
                dev_err(&pdev->dev, "failed to read revision number\n");
-               goto err_extcon;
+               goto err_irq;
        }
        dev_info(info->dev, "device ID : 0x%x\n", id);
 
@@ -765,12 +766,9 @@ static int max77693_muic_probe(struct platform_device *pdev)
 
        return ret;
 
-err_extcon:
-       kfree(info->edev);
 err_irq:
-err_regmap:
-       kfree(info);
-err_kfree:
+       while (--i >= 0)
+               free_irq(muic_irqs[i].virq, info);
        return ret;
 }
 
@@ -783,8 +781,6 @@ static int max77693_muic_remove(struct platform_device *pdev)
                free_irq(muic_irqs[i].virq, info);
        cancel_work_sync(&info->irq_work);
        extcon_dev_unregister(info->edev);
-       kfree(info->edev);
-       kfree(info);
 
        return 0;
 }
index bad76f5..93009fe 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * extcon-max8997.c - MAX8997 extcon driver to support MAX8997 MUIC
  *
- *  Copyright (C) 2012 Samsung Electrnoics
+ *  Copyright (C) 2012 Samsung Electronics
  *  Donggeun Kim <dg77.kim@samsung.com>
  *
  * This program is free software; you can redistribute it and/or modify
@@ -433,11 +433,11 @@ static int max8997_muic_probe(struct platform_device *pdev)
        struct max8997_muic_info *info;
        int ret, i;
 
-       info = kzalloc(sizeof(struct max8997_muic_info), GFP_KERNEL);
+       info = devm_kzalloc(&pdev->dev, sizeof(struct max8997_muic_info),
+                           GFP_KERNEL);
        if (!info) {
                dev_err(&pdev->dev, "failed to allocate memory\n");
-               ret = -ENOMEM;
-               goto err_kfree;
+               return -ENOMEM;
        }
 
        info->dev = &pdev->dev;
@@ -450,14 +450,16 @@ static int max8997_muic_probe(struct platform_device *pdev)
 
        for (i = 0; i < ARRAY_SIZE(muic_irqs); i++) {
                struct max8997_muic_irq *muic_irq = &muic_irqs[i];
-               int virq = 0;
+               unsigned int virq = 0;
 
                virq = irq_create_mapping(max8997->irq_domain, muic_irq->irq);
-               if (!virq)
+               if (!virq) {
+                       ret = -EINVAL;
                        goto err_irq;
+               }
                muic_irq->virq = virq;
 
-               ret = request_threaded_irq(virq, NULL,max8997_muic_irq_handler,
+               ret = request_threaded_irq(virq, NULL, max8997_muic_irq_handler,
                                0, muic_irq->name, info);
                if (ret) {
                        dev_err(&pdev->dev,
@@ -469,7 +471,8 @@ static int max8997_muic_probe(struct platform_device *pdev)
        }
 
        /* External connector */
-       info->edev = kzalloc(sizeof(struct extcon_dev), GFP_KERNEL);
+       info->edev = devm_kzalloc(&pdev->dev, sizeof(struct extcon_dev),
+                                 GFP_KERNEL);
        if (!info->edev) {
                dev_err(&pdev->dev, "failed to allocate memory for extcon\n");
                ret = -ENOMEM;
@@ -480,7 +483,7 @@ static int max8997_muic_probe(struct platform_device *pdev)
        ret = extcon_dev_register(info->edev, NULL);
        if (ret) {
                dev_err(&pdev->dev, "failed to register extcon device\n");
-               goto err_extcon;
+               goto err_irq;
        }
 
        /* Initialize registers according to platform data */
@@ -498,13 +501,9 @@ static int max8997_muic_probe(struct platform_device *pdev)
 
        return ret;
 
-err_extcon:
-       kfree(info->edev);
 err_irq:
        while (--i >= 0)
                free_irq(muic_irqs[i].virq, info);
-       kfree(info);
-err_kfree:
        return ret;
 }
 
@@ -519,9 +518,6 @@ static int max8997_muic_remove(struct platform_device *pdev)
 
        extcon_dev_unregister(info->edev);
 
-       kfree(info->edev);
-       kfree(info);
-
        return 0;
 }
 
index bf892bd..8ae1f5b 100644 (file)
@@ -683,4 +683,17 @@ config GPIO_MSIC
          Enable support for GPIO on intel MSIC controllers found in
          intel MID devices
 
+comment "USB GPIO expanders:"
+
+config GPIO_VIPERBOARD
+       tristate "Viperboard GPIO a & b support"
+       depends on MFD_VIPERBOARD && USB
+       help
+         Say yes here to access the GPIO signals of Nano River
+         Technologies Viperboard. There are two GPIO chips on the
+         board: gpioa and gpiob.
+          See viperboard API specification and Nano
+          River Tech's viperboard.h for detailed meaning
+          of the module parameters.
+
 endif
index 76b3446..c5aebd0 100644 (file)
@@ -76,6 +76,7 @@ obj-$(CONFIG_GPIO_TS5500)     += gpio-ts5500.o
 obj-$(CONFIG_GPIO_TWL4030)     += gpio-twl4030.o
 obj-$(CONFIG_GPIO_TWL6040)     += gpio-twl6040.o
 obj-$(CONFIG_GPIO_UCB1400)     += gpio-ucb1400.o
+obj-$(CONFIG_GPIO_VIPERBOARD)  += gpio-viperboard.o
 obj-$(CONFIG_GPIO_VR41XX)      += gpio-vr41xx.o
 obj-$(CONFIG_GPIO_VT8500)      += gpio-vt8500.o
 obj-$(CONFIG_GPIO_VX855)       += gpio-vx855.o
index a05aacd..29b11e9 100644 (file)
@@ -185,7 +185,11 @@ static int da9052_gpio_to_irq(struct gpio_chip *gc, u32 offset)
        struct da9052_gpio *gpio = to_da9052_gpio(gc);
        struct da9052 *da9052 = gpio->da9052;
 
-       return da9052->irq_base + DA9052_IRQ_GPI0 + offset;
+       int irq;
+
+       irq = regmap_irq_get_virq(da9052->irq_data, DA9052_IRQ_GPI0 + offset);
+
+       return irq;
 }
 
 static struct gpio_chip reference_gp = {
index c1b82da..29e8e75 100644 (file)
@@ -80,6 +80,14 @@ static int tps6586x_gpio_output(struct gpio_chip *gc, unsigned offset,
                                val, mask);
 }
 
+static int tps6586x_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
+{
+       struct tps6586x_gpio *tps6586x_gpio = to_tps6586x_gpio(gc);
+
+       return tps6586x_irq_get_virq(tps6586x_gpio->parent,
+                               TPS6586X_INT_PLDO_0 + offset);
+}
+
 static int tps6586x_gpio_probe(struct platform_device *pdev)
 {
        struct tps6586x_platform_data *pdata;
@@ -106,6 +114,7 @@ static int tps6586x_gpio_probe(struct platform_device *pdev)
        tps6586x_gpio->gpio_chip.direction_output = tps6586x_gpio_output;
        tps6586x_gpio->gpio_chip.set    = tps6586x_gpio_set;
        tps6586x_gpio->gpio_chip.get    = tps6586x_gpio_get;
+       tps6586x_gpio->gpio_chip.to_irq = tps6586x_gpio_to_irq;
 
 #ifdef CONFIG_OF_GPIO
        tps6586x_gpio->gpio_chip.of_node = pdev->dev.parent->of_node;
index 00329f2..9572aa1 100644 (file)
@@ -355,13 +355,13 @@ static struct gpio_chip twl_gpiochip = {
 
 static int gpio_twl4030_pulls(u32 ups, u32 downs)
 {
-       u8              message[6];
+       u8              message[5];
        unsigned        i, gpio_bit;
 
        /* For most pins, a pulldown was enabled by default.
         * We should have data that's specific to this board.
         */
-       for (gpio_bit = 1, i = 1; i < 6; i++) {
+       for (gpio_bit = 1, i = 0; i < 5; i++) {
                u8              bit_mask;
                unsigned        j;
 
@@ -380,16 +380,16 @@ static int gpio_twl4030_pulls(u32 ups, u32 downs)
 
 static int gpio_twl4030_debounce(u32 debounce, u8 mmc_cd)
 {
-       u8              message[4];
+       u8              message[3];
 
        /* 30 msec of debouncing is always used for MMC card detect,
         * and is optional for everything else.
         */
-       message[1] = (debounce & 0xff) | (mmc_cd & 0x03);
+       message[0] = (debounce & 0xff) | (mmc_cd & 0x03);
        debounce >>= 8;
-       message[2] = (debounce & 0xff);
+       message[1] = (debounce & 0xff);
        debounce >>= 8;
-       message[3] = (debounce & 0x03);
+       message[2] = (debounce & 0x03);
 
        return twl_i2c_write(TWL4030_MODULE_GPIO, message,
                                REG_GPIO_DEBEN1, 3);
diff --git a/drivers/gpio/gpio-viperboard.c b/drivers/gpio/gpio-viperboard.c
new file mode 100644 (file)
index 0000000..1377299
--- /dev/null
@@ -0,0 +1,517 @@
+/*
+ *  Nano River Technologies viperboard GPIO lib driver
+ *
+ *  (C) 2012 by Lemonage GmbH
+ *  Author: Lars Poeschel <poeschel@lemonage.de>
+ *  All rights reserved.
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the License, or (at your
+ *  option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+
+#include <linux/usb.h>
+#include <linux/gpio.h>
+
+#include <linux/mfd/viperboard.h>
+
+#define VPRBRD_GPIOA_CLK_1MHZ          0
+#define VPRBRD_GPIOA_CLK_100KHZ                1
+#define VPRBRD_GPIOA_CLK_10KHZ         2
+#define VPRBRD_GPIOA_CLK_1KHZ          3
+#define VPRBRD_GPIOA_CLK_100HZ         4
+#define VPRBRD_GPIOA_CLK_10HZ          5
+
+#define VPRBRD_GPIOA_FREQ_DEFAULT      1000
+
+#define VPRBRD_GPIOA_CMD_CONT          0x00
+#define VPRBRD_GPIOA_CMD_PULSE         0x01
+#define VPRBRD_GPIOA_CMD_PWM           0x02
+#define VPRBRD_GPIOA_CMD_SETOUT                0x03
+#define VPRBRD_GPIOA_CMD_SETIN         0x04
+#define VPRBRD_GPIOA_CMD_SETINT                0x05
+#define VPRBRD_GPIOA_CMD_GETIN         0x06
+
+#define VPRBRD_GPIOB_CMD_SETDIR                0x00
+#define VPRBRD_GPIOB_CMD_SETVAL                0x01
+
+struct vprbrd_gpioa_msg {
+       u8 cmd;
+       u8 clk;
+       u8 offset;
+       u8 t1;
+       u8 t2;
+       u8 invert;
+       u8 pwmlevel;
+       u8 outval;
+       u8 risefall;
+       u8 answer;
+       u8 __fill;
+} __packed;
+
+struct vprbrd_gpiob_msg {
+       u8 cmd;
+       u16 val;
+       u16 mask;
+} __packed;
+
+struct vprbrd_gpio {
+       struct gpio_chip gpioa; /* gpio a related things */
+       u32 gpioa_out;
+       u32 gpioa_val;
+       struct gpio_chip gpiob; /* gpio b related things */
+       u32 gpiob_out;
+       u32 gpiob_val;
+       struct vprbrd *vb;
+};
+
+/* gpioa sampling clock module parameter */
+static unsigned char gpioa_clk;
+static unsigned int gpioa_freq = VPRBRD_GPIOA_FREQ_DEFAULT;
+module_param(gpioa_freq, uint, 0);
+MODULE_PARM_DESC(gpioa_freq,
+       "gpio-a sampling freq in Hz (default is 1000Hz) valid values: 10, 100, 1000, 10000, 100000, 1000000");
+
+/* ----- begin of gipo a chip -------------------------------------------- */
+
+static int vprbrd_gpioa_get(struct gpio_chip *chip,
+               unsigned offset)
+{
+       int ret, answer, error = 0;
+       struct vprbrd_gpio *gpio =
+                       container_of(chip, struct vprbrd_gpio, gpioa);
+       struct vprbrd *vb = gpio->vb;
+       struct vprbrd_gpioa_msg *gamsg = (struct vprbrd_gpioa_msg *)vb->buf;
+
+       /* if io is set to output, just return the saved value */
+       if (gpio->gpioa_out & (1 << offset))
+               return gpio->gpioa_val & (1 << offset);
+
+       mutex_lock(&vb->lock);
+
+       gamsg->cmd = VPRBRD_GPIOA_CMD_GETIN;
+       gamsg->clk = 0x00;
+       gamsg->offset = offset;
+       gamsg->t1 = 0x00;
+       gamsg->t2 = 0x00;
+       gamsg->invert = 0x00;
+       gamsg->pwmlevel = 0x00;
+       gamsg->outval = 0x00;
+       gamsg->risefall = 0x00;
+       gamsg->answer = 0x00;
+       gamsg->__fill = 0x00;
+
+       ret = usb_control_msg(vb->usb_dev, usb_sndctrlpipe(vb->usb_dev, 0),
+               VPRBRD_USB_REQUEST_GPIOA, VPRBRD_USB_TYPE_OUT, 0x0000,
+               0x0000, gamsg, sizeof(struct vprbrd_gpioa_msg),
+               VPRBRD_USB_TIMEOUT_MS);
+       if (ret != sizeof(struct vprbrd_gpioa_msg))
+               error = -EREMOTEIO;
+
+       ret = usb_control_msg(vb->usb_dev, usb_rcvctrlpipe(vb->usb_dev, 0),
+               VPRBRD_USB_REQUEST_GPIOA, VPRBRD_USB_TYPE_IN, 0x0000,
+               0x0000, gamsg, sizeof(struct vprbrd_gpioa_msg),
+               VPRBRD_USB_TIMEOUT_MS);
+       answer = gamsg->answer & 0x01;
+
+       mutex_unlock(&vb->lock);
+
+       if (ret != sizeof(struct vprbrd_gpioa_msg))
+               error = -EREMOTEIO;
+
+       if (error)
+               return error;
+
+       return answer;
+}
+
+static void vprbrd_gpioa_set(struct gpio_chip *chip,
+               unsigned offset, int value)
+{
+       int ret;
+       struct vprbrd_gpio *gpio =
+                       container_of(chip, struct vprbrd_gpio, gpioa);
+       struct vprbrd *vb = gpio->vb;
+       struct vprbrd_gpioa_msg *gamsg = (struct vprbrd_gpioa_msg *)vb->buf;
+
+       if (gpio->gpioa_out & (1 << offset)) {
+               if (value)
+                       gpio->gpioa_val |= (1 << offset);
+               else
+                       gpio->gpioa_val &= ~(1 << offset);
+
+               mutex_lock(&vb->lock);
+
+               gamsg->cmd = VPRBRD_GPIOA_CMD_SETOUT;
+               gamsg->clk = 0x00;
+               gamsg->offset = offset;
+               gamsg->t1 = 0x00;
+               gamsg->t2 = 0x00;
+               gamsg->invert = 0x00;
+               gamsg->pwmlevel = 0x00;
+               gamsg->outval = value;
+               gamsg->risefall = 0x00;
+               gamsg->answer = 0x00;
+               gamsg->__fill = 0x00;
+
+               ret = usb_control_msg(vb->usb_dev,
+                       usb_sndctrlpipe(vb->usb_dev, 0),
+                       VPRBRD_USB_REQUEST_GPIOA, VPRBRD_USB_TYPE_OUT,
+                       0x0000, 0x0000, gamsg,
+                       sizeof(struct vprbrd_gpioa_msg), VPRBRD_USB_TIMEOUT_MS);
+
+               mutex_unlock(&vb->lock);
+
+               if (ret != sizeof(struct vprbrd_gpioa_msg))
+                       dev_err(chip->dev, "usb error setting pin value\n");
+       }
+}
+
+static int vprbrd_gpioa_direction_input(struct gpio_chip *chip,
+                       unsigned offset)
+{
+       int ret;
+       struct vprbrd_gpio *gpio =
+                       container_of(chip, struct vprbrd_gpio, gpioa);
+       struct vprbrd *vb = gpio->vb;
+       struct vprbrd_gpioa_msg *gamsg = (struct vprbrd_gpioa_msg *)vb->buf;
+
+       gpio->gpioa_out &= ~(1 << offset);
+
+       mutex_lock(&vb->lock);
+
+       gamsg->cmd = VPRBRD_GPIOA_CMD_SETIN;
+       gamsg->clk = gpioa_clk;
+       gamsg->offset = offset;
+       gamsg->t1 = 0x00;
+       gamsg->t2 = 0x00;
+       gamsg->invert = 0x00;
+       gamsg->pwmlevel = 0x00;
+       gamsg->outval = 0x00;
+       gamsg->risefall = 0x00;
+       gamsg->answer = 0x00;
+       gamsg->__fill = 0x00;
+
+       ret = usb_control_msg(vb->usb_dev, usb_sndctrlpipe(vb->usb_dev, 0),
+               VPRBRD_USB_REQUEST_GPIOA, VPRBRD_USB_TYPE_OUT, 0x0000,
+               0x0000, gamsg, sizeof(struct vprbrd_gpioa_msg),
+               VPRBRD_USB_TIMEOUT_MS);
+
+       mutex_unlock(&vb->lock);
+
+       if (ret != sizeof(struct vprbrd_gpioa_msg))
+               return -EREMOTEIO;
+
+       return 0;
+}
+
+static int vprbrd_gpioa_direction_output(struct gpio_chip *chip,
+                       unsigned offset, int value)
+{
+       int ret;
+       struct vprbrd_gpio *gpio =
+                       container_of(chip, struct vprbrd_gpio, gpioa);
+       struct vprbrd *vb = gpio->vb;
+       struct vprbrd_gpioa_msg *gamsg = (struct vprbrd_gpioa_msg *)vb->buf;
+
+       gpio->gpioa_out |= (1 << offset);
+       if (value)
+               gpio->gpioa_val |= (1 << offset);
+       else
+               gpio->gpioa_val &= ~(1 << offset);
+
+       mutex_lock(&vb->lock);
+
+       gamsg->cmd = VPRBRD_GPIOA_CMD_SETOUT;
+       gamsg->clk = 0x00;
+       gamsg->offset = offset;
+       gamsg->t1 = 0x00;
+       gamsg->t2 = 0x00;
+       gamsg->invert = 0x00;
+       gamsg->pwmlevel = 0x00;
+       gamsg->outval = value;
+       gamsg->risefall = 0x00;
+       gamsg->answer = 0x00;
+       gamsg->__fill = 0x00;
+
+       ret = usb_control_msg(vb->usb_dev, usb_sndctrlpipe(vb->usb_dev, 0),
+               VPRBRD_USB_REQUEST_GPIOA, VPRBRD_USB_TYPE_OUT, 0x0000,
+               0x0000, gamsg, sizeof(struct vprbrd_gpioa_msg),
+               VPRBRD_USB_TIMEOUT_MS);
+
+       mutex_unlock(&vb->lock);
+
+       if (ret != sizeof(struct vprbrd_gpioa_msg))
+               return -EREMOTEIO;
+
+       return 0;
+}
+
+/* ----- end of gpio a chip ---------------------------------------------- */
+
+/* ----- begin of gipo b chip -------------------------------------------- */
+
+static int vprbrd_gpiob_setdir(struct vprbrd *vb, unsigned offset,
+       unsigned dir)
+{
+       struct vprbrd_gpiob_msg *gbmsg = (struct vprbrd_gpiob_msg *)vb->buf;
+       int ret;
+
+       gbmsg->cmd = VPRBRD_GPIOB_CMD_SETDIR;
+       gbmsg->val = cpu_to_be16(dir << offset);
+       gbmsg->mask = cpu_to_be16(0x0001 << offset);
+
+       ret = usb_control_msg(vb->usb_dev, usb_sndctrlpipe(vb->usb_dev, 0),
+               VPRBRD_USB_REQUEST_GPIOB, VPRBRD_USB_TYPE_OUT, 0x0000,
+               0x0000, gbmsg, sizeof(struct vprbrd_gpiob_msg),
+               VPRBRD_USB_TIMEOUT_MS);
+
+       if (ret != sizeof(struct vprbrd_gpiob_msg))
+               return -EREMOTEIO;
+
+       return 0;
+}
+
+static int vprbrd_gpiob_get(struct gpio_chip *chip,
+               unsigned offset)
+{
+       int ret;
+       u16 val;
+       struct vprbrd_gpio *gpio =
+                       container_of(chip, struct vprbrd_gpio, gpiob);
+       struct vprbrd *vb = gpio->vb;
+       struct vprbrd_gpiob_msg *gbmsg = (struct vprbrd_gpiob_msg *)vb->buf;
+
+       /* if io is set to output, just return the saved value */
+       if (gpio->gpiob_out & (1 << offset))
+               return gpio->gpiob_val & (1 << offset);
+
+       mutex_lock(&vb->lock);
+
+       ret = usb_control_msg(vb->usb_dev, usb_rcvctrlpipe(vb->usb_dev, 0),
+               VPRBRD_USB_REQUEST_GPIOB, VPRBRD_USB_TYPE_IN, 0x0000,
+               0x0000, gbmsg,  sizeof(struct vprbrd_gpiob_msg),
+               VPRBRD_USB_TIMEOUT_MS);
+       val = gbmsg->val;
+
+       mutex_unlock(&vb->lock);
+
+       if (ret != sizeof(struct vprbrd_gpiob_msg))
+               return ret;
+
+       /* cache the read values */
+       gpio->gpiob_val = be16_to_cpu(val);
+
+       return (gpio->gpiob_val >> offset) & 0x1;
+}
+
+static void vprbrd_gpiob_set(struct gpio_chip *chip,
+               unsigned offset, int value)
+{
+       int ret;
+       struct vprbrd_gpio *gpio =
+                       container_of(chip, struct vprbrd_gpio, gpiob);
+       struct vprbrd *vb = gpio->vb;
+       struct vprbrd_gpiob_msg *gbmsg = (struct vprbrd_gpiob_msg *)vb->buf;
+
+       if (gpio->gpiob_out & (1 << offset)) {
+               if (value)
+                       gpio->gpiob_val |= (1 << offset);
+               else
+                       gpio->gpiob_val &= ~(1 << offset);
+
+               mutex_lock(&vb->lock);
+
+               gbmsg->cmd = VPRBRD_GPIOB_CMD_SETVAL;
+               gbmsg->val = cpu_to_be16(value << offset);
+               gbmsg->mask = cpu_to_be16(0x0001 << offset);
+
+               ret = usb_control_msg(vb->usb_dev,
+                       usb_sndctrlpipe(vb->usb_dev, 0),
+                       VPRBRD_USB_REQUEST_GPIOB, VPRBRD_USB_TYPE_OUT,
+                       0x0000, 0x0000, gbmsg,
+                       sizeof(struct vprbrd_gpiob_msg), VPRBRD_USB_TIMEOUT_MS);
+
+               mutex_unlock(&vb->lock);
+
+               if (ret != sizeof(struct vprbrd_gpiob_msg))
+                       dev_err(chip->dev, "usb error setting pin value\n");
+       }
+}
+
+static int vprbrd_gpiob_direction_input(struct gpio_chip *chip,
+                       unsigned offset)
+{
+       int ret;
+       struct vprbrd_gpio *gpio =
+                       container_of(chip, struct vprbrd_gpio, gpiob);
+       struct vprbrd *vb = gpio->vb;
+
+       gpio->gpiob_out &= ~(1 << offset);
+
+       mutex_lock(&vb->lock);
+
+       ret = vprbrd_gpiob_setdir(vb, offset, 0);
+
+       mutex_unlock(&vb->lock);
+
+       if (ret)
+               dev_err(chip->dev, "usb error setting pin to input\n");
+
+       return ret;
+}
+
+static int vprbrd_gpiob_direction_output(struct gpio_chip *chip,
+                       unsigned offset, int value)
+{
+       int ret;
+       struct vprbrd_gpio *gpio =
+                       container_of(chip, struct vprbrd_gpio, gpiob);
+       struct vprbrd *vb = gpio->vb;
+
+       gpio->gpiob_out |= (1 << offset);
+       if (value)
+               gpio->gpiob_val |= (1 << offset);
+       else
+               gpio->gpiob_val &= ~(1 << offset);
+
+       mutex_lock(&vb->lock);
+
+       ret = vprbrd_gpiob_setdir(vb, offset, 1);
+       if (ret)
+               dev_err(chip->dev, "usb error setting pin to output\n");
+
+       mutex_unlock(&vb->lock);
+
+       vprbrd_gpiob_set(chip, offset, value);
+
+       return ret;
+}
+
+/* ----- end of gpio b chip ---------------------------------------------- */
+
+static int __devinit vprbrd_gpio_probe(struct platform_device *pdev)
+{
+       struct vprbrd *vb = dev_get_drvdata(pdev->dev.parent);
+       struct vprbrd_gpio *vb_gpio;
+       int ret;
+
+       vb_gpio = devm_kzalloc(&pdev->dev, sizeof(*vb_gpio), GFP_KERNEL);
+       if (vb_gpio == NULL)
+               return -ENOMEM;
+
+       vb_gpio->vb = vb;
+       /* registering gpio a */
+       vb_gpio->gpioa.label = "viperboard gpio a";
+       vb_gpio->gpioa.dev = &pdev->dev;
+       vb_gpio->gpioa.owner = THIS_MODULE;
+       vb_gpio->gpioa.base = -1;
+       vb_gpio->gpioa.ngpio = 16;
+       vb_gpio->gpioa.can_sleep = 1;
+       vb_gpio->gpioa.set = vprbrd_gpioa_set;
+       vb_gpio->gpioa.get = vprbrd_gpioa_get;
+       vb_gpio->gpioa.direction_input = vprbrd_gpioa_direction_input;
+       vb_gpio->gpioa.direction_output = vprbrd_gpioa_direction_output;
+       ret = gpiochip_add(&vb_gpio->gpioa);
+       if (ret < 0) {
+               dev_err(vb_gpio->gpioa.dev, "could not add gpio a");
+               goto err_gpioa;
+       }
+
+       /* registering gpio b */
+       vb_gpio->gpiob.label = "viperboard gpio b";
+       vb_gpio->gpiob.dev = &pdev->dev;
+       vb_gpio->gpiob.owner = THIS_MODULE;
+       vb_gpio->gpiob.base = -1;
+       vb_gpio->gpiob.ngpio = 16;
+       vb_gpio->gpiob.can_sleep = 1;
+       vb_gpio->gpiob.set = vprbrd_gpiob_set;
+       vb_gpio->gpiob.get = vprbrd_gpiob_get;
+       vb_gpio->gpiob.direction_input = vprbrd_gpiob_direction_input;
+       vb_gpio->gpiob.direction_output = vprbrd_gpiob_direction_output;
+       ret = gpiochip_add(&vb_gpio->gpiob);
+       if (ret < 0) {
+               dev_err(vb_gpio->gpiob.dev, "could not add gpio b");
+               goto err_gpiob;
+       }
+
+       platform_set_drvdata(pdev, vb_gpio);
+
+       return ret;
+
+err_gpiob:
+       ret = gpiochip_remove(&vb_gpio->gpioa);
+
+err_gpioa:
+       return ret;
+}
+
+static int __devexit vprbrd_gpio_remove(struct platform_device *pdev)
+{
+       struct vprbrd_gpio *vb_gpio = platform_get_drvdata(pdev);
+       int ret;
+
+       ret = gpiochip_remove(&vb_gpio->gpiob);
+       if (ret == 0)
+               ret = gpiochip_remove(&vb_gpio->gpioa);
+
+       return ret;
+}
+
+static struct platform_driver vprbrd_gpio_driver = {
+       .driver.name    = "viperboard-gpio",
+       .driver.owner   = THIS_MODULE,
+       .probe          = vprbrd_gpio_probe,
+       .remove         = __devexit_p(vprbrd_gpio_remove),
+};
+
+static int __init vprbrd_gpio_init(void)
+{
+       switch (gpioa_freq) {
+       case 1000000:
+               gpioa_clk = VPRBRD_GPIOA_CLK_1MHZ;
+               break;
+       case 100000:
+               gpioa_clk = VPRBRD_GPIOA_CLK_100KHZ;
+               break;
+       case 10000:
+               gpioa_clk = VPRBRD_GPIOA_CLK_10KHZ;
+               break;
+       case 1000:
+               gpioa_clk = VPRBRD_GPIOA_CLK_1KHZ;
+               break;
+       case 100:
+               gpioa_clk = VPRBRD_GPIOA_CLK_100HZ;
+               break;
+       case 10:
+               gpioa_clk = VPRBRD_GPIOA_CLK_10HZ;
+               break;
+       default:
+               pr_warn("invalid gpioa_freq (%d)\n", gpioa_freq);
+               gpioa_clk = VPRBRD_GPIOA_CLK_1KHZ;
+       }
+
+       return platform_driver_register(&vprbrd_gpio_driver);
+}
+subsys_initcall(vprbrd_gpio_init);
+
+static void __exit vprbrd_gpio_exit(void)
+{
+       platform_driver_unregister(&vprbrd_gpio_driver);
+}
+module_exit(vprbrd_gpio_exit);
+
+MODULE_AUTHOR("Lars Poeschel <poeschel@lemonage.de>");
+MODULE_DESCRIPTION("GPIO driver for Nano River Techs Viperboard");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:viperboard-gpio");
index 18321b6..983201b 100644 (file)
@@ -210,3 +210,5 @@ source "drivers/gpu/drm/mgag200/Kconfig"
 source "drivers/gpu/drm/cirrus/Kconfig"
 
 source "drivers/gpu/drm/shmobile/Kconfig"
+
+source "drivers/gpu/drm/tegra/Kconfig"
index 2ff5cef..6f58c81 100644 (file)
@@ -8,7 +8,7 @@ drm-y       :=  drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
                drm_context.o drm_dma.o \
                drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
                drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
-               drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
+               drm_agpsupport.o drm_scatter.o drm_pci.o \
                drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \
                drm_crtc.o drm_modes.o drm_edid.o \
                drm_info.o drm_debugfs.o drm_encoder_slave.o \
@@ -16,10 +16,11 @@ drm-y       :=      drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
 
 drm-$(CONFIG_COMPAT) += drm_ioc32.o
 drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
+drm-$(CONFIG_PCI) += ati_pcigart.o
 
 drm-usb-y   := drm_usb.o
 
-drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_i2c_helper.o
+drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_helper.o
 drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
 drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
 
@@ -48,4 +49,5 @@ obj-$(CONFIG_DRM_GMA500) += gma500/
 obj-$(CONFIG_DRM_UDL) += udl/
 obj-$(CONFIG_DRM_AST) += ast/
 obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/
+obj-$(CONFIG_DRM_TEGRA) += tegra/
 obj-y                  += i2c/
index 1a026ac..3602731 100644 (file)
@@ -186,11 +186,11 @@ static void ast_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *
 
 static int ast_bo_move(struct ttm_buffer_object *bo,
                       bool evict, bool interruptible,
-                      bool no_wait_reserve, bool no_wait_gpu,
+                      bool no_wait_gpu,
                       struct ttm_mem_reg *new_mem)
 {
        int r;
-       r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+       r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
        return r;
 }
 
@@ -356,7 +356,7 @@ int ast_bo_create(struct drm_device *dev, int size, int align,
 
        ret = ttm_bo_init(&ast->ttm.bdev, &astbo->bo, size,
                          ttm_bo_type_device, &astbo->placement,
-                         align >> PAGE_SHIFT, 0, false, NULL, acc_size,
+                         align >> PAGE_SHIFT, false, NULL, acc_size,
                          NULL, ast_bo_ttm_destroy);
        if (ret)
                return ret;
@@ -383,7 +383,7 @@ int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr)
        ast_ttm_placement(bo, pl_flag);
        for (i = 0; i < bo->placement.num_placement; i++)
                bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
-       ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+       ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
        if (ret)
                return ret;
 
@@ -406,7 +406,7 @@ int ast_bo_unpin(struct ast_bo *bo)
 
        for (i = 0; i < bo->placement.num_placement ; i++)
                bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
-       ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+       ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
        if (ret)
                return ret;
 
@@ -431,7 +431,7 @@ int ast_bo_push_sysram(struct ast_bo *bo)
        for (i = 0; i < bo->placement.num_placement ; i++)
                bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
 
-       ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+       ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
        if (ret) {
                DRM_ERROR("pushing to VRAM failed\n");
                return ret;
index 101e423..dcd1a8c 100644 (file)
@@ -35,12 +35,15 @@ static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
 };
 
 
-static void cirrus_kick_out_firmware_fb(struct pci_dev *pdev)
+static int cirrus_kick_out_firmware_fb(struct pci_dev *pdev)
 {
        struct apertures_struct *ap;
        bool primary = false;
 
        ap = alloc_apertures(1);
+       if (!ap)
+               return -ENOMEM;
+
        ap->ranges[0].base = pci_resource_start(pdev, 0);
        ap->ranges[0].size = pci_resource_len(pdev, 0);
 
@@ -49,12 +52,18 @@ static void cirrus_kick_out_firmware_fb(struct pci_dev *pdev)
 #endif
        remove_conflicting_framebuffers(ap, "cirrusdrmfb", primary);
        kfree(ap);
+
+       return 0;
 }
 
 static int __devinit
 cirrus_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
-       cirrus_kick_out_firmware_fb(pdev);
+       int ret;
+
+       ret = cirrus_kick_out_firmware_fb(pdev);
+       if (ret)
+               return ret;
 
        return drm_get_pci_dev(pdev, ent, &driver);
 }
index bc83f83..1413a26 100644 (file)
@@ -186,11 +186,11 @@ static void cirrus_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_re
 
 static int cirrus_bo_move(struct ttm_buffer_object *bo,
                       bool evict, bool interruptible,
-                      bool no_wait_reserve, bool no_wait_gpu,
+                      bool no_wait_gpu,
                       struct ttm_mem_reg *new_mem)
 {
        int r;
-       r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+       r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
        return r;
 }
 
@@ -361,7 +361,7 @@ int cirrus_bo_create(struct drm_device *dev, int size, int align,
 
        ret = ttm_bo_init(&cirrus->ttm.bdev, &cirrusbo->bo, size,
                          ttm_bo_type_device, &cirrusbo->placement,
-                         align >> PAGE_SHIFT, 0, false, NULL, acc_size,
+                         align >> PAGE_SHIFT, false, NULL, acc_size,
                          NULL, cirrus_bo_ttm_destroy);
        if (ret)
                return ret;
@@ -388,7 +388,7 @@ int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr)
        cirrus_ttm_placement(bo, pl_flag);
        for (i = 0; i < bo->placement.num_placement; i++)
                bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
-       ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+       ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
        if (ret)
                return ret;
 
@@ -411,7 +411,7 @@ int cirrus_bo_unpin(struct cirrus_bo *bo)
 
        for (i = 0; i < bo->placement.num_placement ; i++)
                bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
-       ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+       ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
        if (ret)
                return ret;
 
@@ -436,7 +436,7 @@ int cirrus_bo_push_sysram(struct cirrus_bo *bo)
        for (i = 0; i < bo->placement.num_placement ; i++)
                bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
 
-       ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+       ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
        if (ret) {
                DRM_ERROR("pushing to VRAM failed\n");
                return ret;
index ef1b221..f2d667b 100644 (file)
@@ -470,10 +470,8 @@ void drm_crtc_cleanup(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
 
-       if (crtc->gamma_store) {
-               kfree(crtc->gamma_store);
-               crtc->gamma_store = NULL;
-       }
+       kfree(crtc->gamma_store);
+       crtc->gamma_store = NULL;
 
        drm_mode_object_put(dev, &crtc->base);
        list_del(&crtc->head);
@@ -555,16 +553,17 @@ int drm_connector_init(struct drm_device *dev,
        INIT_LIST_HEAD(&connector->probed_modes);
        INIT_LIST_HEAD(&connector->modes);
        connector->edid_blob_ptr = NULL;
+       connector->status = connector_status_unknown;
 
        list_add_tail(&connector->head, &dev->mode_config.connector_list);
        dev->mode_config.num_connector++;
 
        if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL)
-               drm_connector_attach_property(connector,
+               drm_object_attach_property(&connector->base,
                                              dev->mode_config.edid_property,
                                              0);
 
-       drm_connector_attach_property(connector,
+       drm_object_attach_property(&connector->base,
                                      dev->mode_config.dpms_property, 0);
 
  out:
@@ -2280,13 +2279,21 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
 
        for (i = 0; i < num_planes; i++) {
                unsigned int width = r->width / (i != 0 ? hsub : 1);
+               unsigned int height = r->height / (i != 0 ? vsub : 1);
+               unsigned int cpp = drm_format_plane_cpp(r->pixel_format, i);
 
                if (!r->handles[i]) {
                        DRM_DEBUG_KMS("no buffer object handle for plane %d\n", i);
                        return -EINVAL;
                }
 
-               if (r->pitches[i] < drm_format_plane_cpp(r->pixel_format, i) * width) {
+               if ((uint64_t) width * cpp > UINT_MAX)
+                       return -ERANGE;
+
+               if ((uint64_t) height * r->pitches[i] + r->offsets[i] > UINT_MAX)
+                       return -ERANGE;
+
+               if (r->pitches[i] < width * cpp) {
                        DRM_DEBUG_KMS("bad pitch %u for plane %d\n", r->pitches[i], i);
                        return -EINVAL;
                }
@@ -2323,6 +2330,11 @@ int drm_mode_addfb2(struct drm_device *dev,
        if (!drm_core_check_feature(dev, DRIVER_MODESET))
                return -EINVAL;
 
+       if (r->flags & ~DRM_MODE_FB_INTERLACED) {
+               DRM_DEBUG_KMS("bad framebuffer flags 0x%08x\n", r->flags);
+               return -EINVAL;
+       }
+
        if ((config->min_width > r->width) || (r->width > config->max_width)) {
                DRM_DEBUG_KMS("bad framebuffer width %d, should be >= %d && <= %d\n",
                          r->width, config->min_width, config->max_width);
@@ -2916,27 +2928,6 @@ void drm_property_destroy(struct drm_device *dev, struct drm_property *property)
 }
 EXPORT_SYMBOL(drm_property_destroy);
 
-void drm_connector_attach_property(struct drm_connector *connector,
-                              struct drm_property *property, uint64_t init_val)
-{
-       drm_object_attach_property(&connector->base, property, init_val);
-}
-EXPORT_SYMBOL(drm_connector_attach_property);
-
-int drm_connector_property_set_value(struct drm_connector *connector,
-                                 struct drm_property *property, uint64_t value)
-{
-       return drm_object_property_set_value(&connector->base, property, value);
-}
-EXPORT_SYMBOL(drm_connector_property_set_value);
-
-int drm_connector_property_get_value(struct drm_connector *connector,
-                                 struct drm_property *property, uint64_t *val)
-{
-       return drm_object_property_get_value(&connector->base, property, val);
-}
-EXPORT_SYMBOL(drm_connector_property_get_value);
-
 void drm_object_attach_property(struct drm_mode_object *obj,
                                struct drm_property *property,
                                uint64_t init_val)
@@ -3173,15 +3164,17 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
        /* Delete edid, when there is none. */
        if (!edid) {
                connector->edid_blob_ptr = NULL;
-               ret = drm_connector_property_set_value(connector, dev->mode_config.edid_property, 0);
+               ret = drm_object_property_set_value(&connector->base, dev->mode_config.edid_property, 0);
                return ret;
        }
 
        size = EDID_LENGTH * (1 + edid->extensions);
        connector->edid_blob_ptr = drm_property_create_blob(connector->dev,
                                                            size, edid);
+       if (!connector->edid_blob_ptr)
+               return -EINVAL;
 
-       ret = drm_connector_property_set_value(connector,
+       ret = drm_object_property_set_value(&connector->base,
                                               dev->mode_config.edid_property,
                                               connector->edid_blob_ptr->base.id);
 
@@ -3204,6 +3197,9 @@ static bool drm_property_change_is_valid(struct drm_property *property,
                for (i = 0; i < property->num_values; i++)
                        valid_mask |= (1ULL << property->values[i]);
                return !(value & ~valid_mask);
+       } else if (property->flags & DRM_MODE_PROP_BLOB) {
+               /* Only the driver knows */
+               return true;
        } else {
                int i;
                for (i = 0; i < property->num_values; i++)
@@ -3245,7 +3241,7 @@ static int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
 
        /* store the property value if successful */
        if (!ret)
-               drm_connector_property_set_value(connector, property, value);
+               drm_object_property_set_value(&connector->base, property, value);
        return ret;
 }
 
@@ -3656,9 +3652,12 @@ void drm_mode_config_reset(struct drm_device *dev)
                if (encoder->funcs->reset)
                        encoder->funcs->reset(encoder);
 
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               connector->status = connector_status_unknown;
+
                if (connector->funcs->reset)
                        connector->funcs->reset(connector);
+       }
 }
 EXPORT_SYMBOL(drm_mode_config_reset);
 
index 1227adf..7b2d378 100644 (file)
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_edid.h>
 
+/**
+ * drm_helper_move_panel_connectors_to_head() - move panels to the front in the
+ *                                             connector list
+ * @dev: drm device to operate on
+ *
+ * Some userspace presumes that the first connected connector is the main
+ * display, where it's supposed to display e.g. the login screen. For
+ * laptops, this should be the main panel. Use this function to sort all
+ * (eDP/LVDS) panels to the front of the connector list, instead of
+ * painstakingly trying to initialize them in the right order.
+ */
+void drm_helper_move_panel_connectors_to_head(struct drm_device *dev)
+{
+       struct drm_connector *connector, *tmp;
+       struct list_head panel_list;
+
+       INIT_LIST_HEAD(&panel_list);
+
+       list_for_each_entry_safe(connector, tmp,
+                                &dev->mode_config.connector_list, head) {
+               if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS ||
+                   connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+                       list_move_tail(&connector->head, &panel_list);
+       }
+
+       list_splice(&panel_list, &dev->mode_config.connector_list);
+}
+EXPORT_SYMBOL(drm_helper_move_panel_connectors_to_head);
+
 static bool drm_kms_helper_poll = true;
 module_param_named(poll, drm_kms_helper_poll, bool, 0600);
 
@@ -64,22 +93,21 @@ static void drm_mode_validate_flag(struct drm_connector *connector,
 
 /**
  * drm_helper_probe_single_connector_modes - get complete set of display modes
- * @dev: DRM device
+ * @connector: connector to probe
  * @maxX: max width for modes
  * @maxY: max height for modes
  *
  * LOCKING:
  * Caller must hold mode config lock.
  *
- * Based on @dev's mode_config layout, scan all the connectors and try to detect
- * modes on them.  Modes will first be added to the connector's probed_modes
- * list, then culled (based on validity and the @maxX, @maxY parameters) and
- * put into the normal modes list.
+ * Based on the helper callbacks implemented by @connector try to detect all
+ * valid modes.  Modes will first be added to the connector's probed_modes list,
+ * then culled (based on validity and the @maxX, @maxY parameters) and put into
+ * the normal modes list.
  *
- * Intended to be used either at bootup time or when major configuration
- * changes have occurred.
- *
- * FIXME: take into account monitor limits
+ * Intended to be use as a generic implementation of the ->probe() @connector
+ * callback for drivers that use the crtc helpers for output mode filtering and
+ * detection.
  *
  * RETURNS:
  * Number of modes found on @connector.
@@ -109,9 +137,14 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
                        connector->funcs->force(connector);
        } else {
                connector->status = connector->funcs->detect(connector, true);
-               drm_kms_helper_poll_enable(dev);
        }
 
+       /* Re-enable polling in case the global poll config changed. */
+       if (drm_kms_helper_poll != dev->mode_config.poll_running)
+               drm_kms_helper_poll_enable(dev);
+
+       dev->mode_config.poll_running = drm_kms_helper_poll;
+
        if (connector->status == connector_status_disconnected) {
                DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
                        connector->base.id, drm_get_connector_name(connector));
@@ -325,17 +358,24 @@ drm_crtc_prepare_encoders(struct drm_device *dev)
 }
 
 /**
- * drm_crtc_set_mode - set a mode
+ * drm_crtc_helper_set_mode - internal helper to set a mode
  * @crtc: CRTC to program
  * @mode: mode to use
- * @x: width of mode
- * @y: height of mode
+ * @x: horizontal offset into the surface
+ * @y: vertical offset into the surface
+ * @old_fb: old framebuffer, for cleanup
  *
  * LOCKING:
  * Caller must hold mode config lock.
  *
  * Try to set @mode on @crtc.  Give @crtc and its associated connectors a chance
- * to fixup or reject the mode prior to trying to set it.
+ * to fixup or reject the mode prior to trying to set it. This is an internal
+ * helper that drivers could e.g. use to update properties that require the
+ * entire output pipe to be disabled and re-enabled in a new configuration. For
+ * example for changing whether audio is enabled on a hdmi link or for changing
+ * panel fitter or dither attributes. It is also called by the
+ * drm_crtc_helper_set_config() helper function to drive the mode setting
+ * sequence.
  *
  * RETURNS:
  * True if the mode was set successfully, or false otherwise.
@@ -491,20 +531,19 @@ drm_crtc_helper_disable(struct drm_crtc *crtc)
 
 /**
  * drm_crtc_helper_set_config - set a new config from userspace
- * @crtc: CRTC to setup
- * @crtc_info: user provided configuration
- * @new_mode: new mode to set
- * @connector_set: set of connectors for the new config
- * @fb: new framebuffer
+ * @set: mode set configuration
  *
  * LOCKING:
  * Caller must hold mode config lock.
  *
- * Setup a new configuration, provided by the user in @crtc_info, and enable
- * it.
+ * Setup a new configuration, provided by the upper layers (either an ioctl call
+ * from userspace or internally e.g. from the fbdev suppport code) in @set, and
+ * enable it. This is the main helper functions for drivers that implement
+ * kernel mode setting with the crtc helper functions and the assorted
+ * ->prepare(), ->modeset() and ->commit() helper callbacks.
  *
  * RETURNS:
- * Zero. (FIXME)
+ * Returns 0 on success, -ERRNO on failure.
  */
 int drm_crtc_helper_set_config(struct drm_mode_set *set)
 {
@@ -800,12 +839,14 @@ static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc)
 }
 
 /**
- * drm_helper_connector_dpms
- * @connector affected connector
- * @mode DPMS mode
+ * drm_helper_connector_dpms() - connector dpms helper implementation
+ * @connector: affected connector
+ * @mode: DPMS mode
  *
- * Calls the low-level connector DPMS function, then
- * calls appropriate encoder and crtc DPMS functions as well
+ * This is the main helper function provided by the crtc helper framework for
+ * implementing the DPMS connector attribute. It computes the new desired DPMS
+ * state for all encoders and crtcs in the output mesh and calls the ->dpms()
+ * callback provided by the driver appropriately.
  */
 void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
 {
@@ -918,6 +959,15 @@ int drm_helper_resume_force_mode(struct drm_device *dev)
 }
 EXPORT_SYMBOL(drm_helper_resume_force_mode);
 
+void drm_kms_helper_hotplug_event(struct drm_device *dev)
+{
+       /* send a uevent + call fbdev */
+       drm_sysfs_hotplug_event(dev);
+       if (dev->mode_config.funcs->output_poll_changed)
+               dev->mode_config.funcs->output_poll_changed(dev);
+}
+EXPORT_SYMBOL(drm_kms_helper_hotplug_event);
+
 #define DRM_OUTPUT_POLL_PERIOD (10*HZ)
 static void output_poll_execute(struct work_struct *work)
 {
@@ -933,20 +983,22 @@ static void output_poll_execute(struct work_struct *work)
        mutex_lock(&dev->mode_config.mutex);
        list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 
-               /* if this is HPD or polled don't check it -
-                  TV out for instance */
-               if (!connector->polled)
+               /* Ignore forced connectors. */
+               if (connector->force)
                        continue;
 
-               else if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT))
-                       repoll = true;
+               /* Ignore HPD capable connectors and connectors where we don't
+                * want any hotplug detection at all for polling. */
+               if (!connector->polled || connector->polled == DRM_CONNECTOR_POLL_HPD)
+                       continue;
+
+               repoll = true;
 
                old_status = connector->status;
                /* if we are connected and don't want to poll for disconnect
                   skip it */
                if (old_status == connector_status_connected &&
-                   !(connector->polled & DRM_CONNECTOR_POLL_DISCONNECT) &&
-                   !(connector->polled & DRM_CONNECTOR_POLL_HPD))
+                   !(connector->polled & DRM_CONNECTOR_POLL_DISCONNECT))
                        continue;
 
                connector->status = connector->funcs->detect(connector, false);
@@ -960,12 +1012,8 @@ static void output_poll_execute(struct work_struct *work)
 
        mutex_unlock(&dev->mode_config.mutex);
 
-       if (changed) {
-               /* send a uevent + call fbdev */
-               drm_sysfs_hotplug_event(dev);
-               if (dev->mode_config.funcs->output_poll_changed)
-                       dev->mode_config.funcs->output_poll_changed(dev);
-       }
+       if (changed)
+               drm_kms_helper_hotplug_event(dev);
 
        if (repoll)
                schedule_delayed_work(delayed_work, DRM_OUTPUT_POLL_PERIOD);
@@ -988,7 +1036,8 @@ void drm_kms_helper_poll_enable(struct drm_device *dev)
                return;
 
        list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-               if (connector->polled)
+               if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT |
+                                        DRM_CONNECTOR_POLL_DISCONNECT))
                        poll = true;
        }
 
@@ -1014,12 +1063,34 @@ EXPORT_SYMBOL(drm_kms_helper_poll_fini);
 
 void drm_helper_hpd_irq_event(struct drm_device *dev)
 {
+       struct drm_connector *connector;
+       enum drm_connector_status old_status;
+       bool changed = false;
+
        if (!dev->mode_config.poll_enabled)
                return;
 
-       /* kill timer and schedule immediate execution, this doesn't block */
-       cancel_delayed_work(&dev->mode_config.output_poll_work);
-       if (drm_kms_helper_poll)
-               schedule_delayed_work(&dev->mode_config.output_poll_work, 0);
+       mutex_lock(&dev->mode_config.mutex);
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+
+               /* Only handle HPD capable connectors. */
+               if (!(connector->polled & DRM_CONNECTOR_POLL_HPD))
+                       continue;
+
+               old_status = connector->status;
+
+               connector->status = connector->funcs->detect(connector, false);
+               DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
+                             connector->base.id,
+                             drm_get_connector_name(connector),
+                             old_status, connector->status);
+               if (old_status != connector->status)
+                       changed = true;
+       }
+
+       mutex_unlock(&dev->mode_config.mutex);
+
+       if (changed)
+               drm_kms_helper_hotplug_event(dev);
 }
 EXPORT_SYMBOL(drm_helper_hpd_irq_event);
similarity index 58%
rename from drivers/gpu/drm/drm_dp_i2c_helper.c
rename to drivers/gpu/drm/drm_dp_helper.c
index 7f246f2..89e1966 100644 (file)
 #include <drm/drm_dp_helper.h>
 #include <drm/drmP.h>
 
+/**
+ * DOC: dp helpers
+ *
+ * These functions contain some common logic and helpers at various abstraction
+ * levels to deal with Display Port sink devices and related things like DP aux
+ * channel transfers, EDID reading over DP aux channels, decoding certain DPCD
+ * blocks, ...
+ */
+
 /* Run a single AUX_CH I2C transaction, writing/reading data as necessary */
 static int
 i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode,
@@ -37,7 +46,7 @@ i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode,
 {
        struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
        int ret;
-       
+
        ret = (*algo_data->aux_ch)(adapter, mode,
                                   write_byte, read_byte);
        return ret;
@@ -182,7 +191,6 @@ i2c_dp_aux_reset_bus(struct i2c_adapter *adapter)
 {
        (void) i2c_algo_dp_aux_address(adapter, 0, false);
        (void) i2c_algo_dp_aux_stop(adapter, false);
-                                          
 }
 
 static int
@@ -194,11 +202,23 @@ i2c_dp_aux_prepare_bus(struct i2c_adapter *adapter)
        return 0;
 }
 
+/**
+ * i2c_dp_aux_add_bus() - register an i2c adapter using the aux ch helper
+ * @adapter: i2c adapter to register
+ *
+ * This registers an i2c adapater that uses dp aux channel as it's underlaying
+ * transport. The driver needs to fill out the &i2c_algo_dp_aux_data structure
+ * and store it in the algo_data member of the @adapter argument. This will be
+ * used by the i2c over dp aux algorithm to drive the hardware.
+ *
+ * RETURNS:
+ * 0 on success, -ERRNO on failure.
+ */
 int
 i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
 {
        int error;
-       
+
        error = i2c_dp_aux_prepare_bus(adapter);
        if (error)
                return error;
@@ -206,3 +226,123 @@ i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
        return error;
 }
 EXPORT_SYMBOL(i2c_dp_aux_add_bus);
+
+/* Helpers for DP link training */
+static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r)
+{
+       return link_status[r - DP_LANE0_1_STATUS];
+}
+
+static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE],
+                            int lane)
+{
+       int i = DP_LANE0_1_STATUS + (lane >> 1);
+       int s = (lane & 1) * 4;
+       u8 l = dp_link_status(link_status, i);
+       return (l >> s) & 0xf;
+}
+
+bool drm_dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
+                         int lane_count)
+{
+       u8 lane_align;
+       u8 lane_status;
+       int lane;
+
+       lane_align = dp_link_status(link_status,
+                                   DP_LANE_ALIGN_STATUS_UPDATED);
+       if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
+               return false;
+       for (lane = 0; lane < lane_count; lane++) {
+               lane_status = dp_get_lane_status(link_status, lane);
+               if ((lane_status & DP_CHANNEL_EQ_BITS) != DP_CHANNEL_EQ_BITS)
+                       return false;
+       }
+       return true;
+}
+EXPORT_SYMBOL(drm_dp_channel_eq_ok);
+
+bool drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
+                             int lane_count)
+{
+       int lane;
+       u8 lane_status;
+
+       for (lane = 0; lane < lane_count; lane++) {
+               lane_status = dp_get_lane_status(link_status, lane);
+               if ((lane_status & DP_LANE_CR_DONE) == 0)
+                       return false;
+       }
+       return true;
+}
+EXPORT_SYMBOL(drm_dp_clock_recovery_ok);
+
+u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
+                                    int lane)
+{
+       int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
+       int s = ((lane & 1) ?
+                DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
+                DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
+       u8 l = dp_link_status(link_status, i);
+
+       return ((l >> s) & 0x3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
+}
+EXPORT_SYMBOL(drm_dp_get_adjust_request_voltage);
+
+u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
+                                         int lane)
+{
+       int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
+       int s = ((lane & 1) ?
+                DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
+                DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
+       u8 l = dp_link_status(link_status, i);
+
+       return ((l >> s) & 0x3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
+}
+EXPORT_SYMBOL(drm_dp_get_adjust_request_pre_emphasis);
+
+void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
+       if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
+               udelay(100);
+       else
+               mdelay(dpcd[DP_TRAINING_AUX_RD_INTERVAL] * 4);
+}
+EXPORT_SYMBOL(drm_dp_link_train_clock_recovery_delay);
+
+void drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
+       if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
+               udelay(400);
+       else
+               mdelay(dpcd[DP_TRAINING_AUX_RD_INTERVAL] * 4);
+}
+EXPORT_SYMBOL(drm_dp_link_train_channel_eq_delay);
+
+u8 drm_dp_link_rate_to_bw_code(int link_rate)
+{
+       switch (link_rate) {
+       case 162000:
+       default:
+               return DP_LINK_BW_1_62;
+       case 270000:
+               return DP_LINK_BW_2_7;
+       case 540000:
+               return DP_LINK_BW_5_4;
+       }
+}
+EXPORT_SYMBOL(drm_dp_link_rate_to_bw_code);
+
+int drm_dp_bw_code_to_link_rate(u8 link_bw)
+{
+       switch (link_bw) {
+       case DP_LINK_BW_1_62:
+       default:
+               return 162000;
+       case DP_LINK_BW_2_7:
+               return 270000;
+       case DP_LINK_BW_5_4:
+               return 540000;
+       }
+}
+EXPORT_SYMBOL(drm_dp_bw_code_to_link_rate);
index fadcd44..5a3770f 100644 (file)
@@ -307,12 +307,9 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
 
 static bool drm_edid_is_zero(u8 *in_edid, int length)
 {
-       int i;
-       u32 *raw_edid = (u32 *)in_edid;
+       if (memchr_inv(in_edid, 0, length))
+               return false;
 
-       for (i = 0; i < length / 4; i++)
-               if (*(raw_edid + i) != 0)
-                       return false;
        return true;
 }
 
@@ -1516,6 +1513,26 @@ u8 *drm_find_cea_extension(struct edid *edid)
 }
 EXPORT_SYMBOL(drm_find_cea_extension);
 
+/*
+ * Looks for a CEA mode matching given drm_display_mode.
+ * Returns its CEA Video ID code, or 0 if not found.
+ */
+u8 drm_match_cea_mode(struct drm_display_mode *to_match)
+{
+       struct drm_display_mode *cea_mode;
+       u8 mode;
+
+       for (mode = 0; mode < drm_num_cea_modes; mode++) {
+               cea_mode = (struct drm_display_mode *)&edid_cea_modes[mode];
+
+               if (drm_mode_equal(to_match, cea_mode))
+                       return mode + 1;
+       }
+       return 0;
+}
+EXPORT_SYMBOL(drm_match_cea_mode);
+
+
 static int
 do_cea_modes (struct drm_connector *connector, u8 *db, u8 len)
 {
@@ -1622,7 +1639,7 @@ parse_hdmi_vsdb(struct drm_connector *connector, const u8 *db)
        if (len >= 12)
                connector->audio_latency[1] = db[12];
 
-       DRM_LOG_KMS("HDMI: DVI dual %d, "
+       DRM_DEBUG_KMS("HDMI: DVI dual %d, "
                    "max TMDS clock %d, "
                    "latency present %d %d, "
                    "video latency %d %d, "
@@ -2062,3 +2079,22 @@ int drm_add_modes_noedid(struct drm_connector *connector,
        return num_modes;
 }
 EXPORT_SYMBOL(drm_add_modes_noedid);
+
+/**
+ * drm_mode_cea_vic - return the CEA-861 VIC of a given mode
+ * @mode: mode
+ *
+ * RETURNS:
+ * The VIC number, 0 in case it's not a CEA-861 mode.
+ */
+uint8_t drm_mode_cea_vic(const struct drm_display_mode *mode)
+{
+       uint8_t i;
+
+       for (i = 0; i < drm_num_cea_modes; i++)
+               if (drm_mode_equal(mode, &edid_cea_modes[i]))
+                       return i + 1;
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_mode_cea_vic);
index 4d58d7e..954d175 100644 (file)
@@ -27,6 +27,8 @@
  *      Dave Airlie <airlied@linux.ie>
  *      Jesse Barnes <jesse.barnes@intel.com>
  */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/sysrq.h>
 #include <linux/slab.h>
@@ -43,6 +45,15 @@ MODULE_LICENSE("GPL and additional rights");
 
 static LIST_HEAD(kernel_fb_helper_list);
 
+/**
+ * DOC: fbdev helpers
+ *
+ * The fb helper functions are useful to provide an fbdev on top of a drm kernel
+ * mode setting driver. They can be used mostly independantely from the crtc
+ * helper functions used by many drivers to implement the kernel mode setting
+ * interfaces.
+ */
+
 /* simple single crtc case helper function */
 int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
 {
@@ -95,10 +106,16 @@ static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper)
                        if (mode->force) {
                                const char *s;
                                switch (mode->force) {
-                               case DRM_FORCE_OFF: s = "OFF"; break;
-                               case DRM_FORCE_ON_DIGITAL: s = "ON - dig"; break;
+                               case DRM_FORCE_OFF:
+                                       s = "OFF";
+                                       break;
+                               case DRM_FORCE_ON_DIGITAL:
+                                       s = "ON - dig";
+                                       break;
                                default:
-                               case DRM_FORCE_ON: s = "ON"; break;
+                               case DRM_FORCE_ON:
+                                       s = "ON";
+                                       break;
                                }
 
                                DRM_INFO("forcing %s connector %s\n",
@@ -265,7 +282,7 @@ int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed,
        if (panic_timeout < 0)
                return 0;
 
-       printk(KERN_ERR "panic occurred, switching back to text console\n");
+       pr_err("panic occurred, switching back to text console\n");
        return drm_fb_helper_force_kernel_mode();
 }
 EXPORT_SYMBOL(drm_fb_helper_panic);
@@ -331,7 +348,7 @@ static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
                for (j = 0; j < fb_helper->connector_count; j++) {
                        connector = fb_helper->connector_info[j]->connector;
                        connector->funcs->dpms(connector, dpms_mode);
-                       drm_connector_property_set_value(connector,
+                       drm_object_property_set_value(&connector->base,
                                dev->mode_config.dpms_property, dpms_mode);
                }
        }
@@ -433,7 +450,7 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
        if (!list_empty(&fb_helper->kernel_fb_list)) {
                list_del(&fb_helper->kernel_fb_list);
                if (list_empty(&kernel_fb_helper_list)) {
-                       printk(KERN_INFO "drm: unregistered panic notifier\n");
+                       pr_info("drm: unregistered panic notifier\n");
                        atomic_notifier_chain_unregister(&panic_notifier_list,
                                                         &paniced);
                        unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
@@ -724,9 +741,9 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
 
        /* if driver picks 8 or 16 by default use that
           for both depth/bpp */
-       if (preferred_bpp != sizes.surface_bpp) {
+       if (preferred_bpp != sizes.surface_bpp)
                sizes.surface_depth = sizes.surface_bpp = preferred_bpp;
-       }
+
        /* first up get a count of crtcs now in use and new min/maxes width/heights */
        for (i = 0; i < fb_helper->connector_count; i++) {
                struct drm_fb_helper_connector *fb_helper_conn = fb_helper->connector_info[i];
@@ -794,18 +811,16 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
        info = fb_helper->fbdev;
 
        /* set the fb pointer */
-       for (i = 0; i < fb_helper->crtc_count; i++) {
+       for (i = 0; i < fb_helper->crtc_count; i++)
                fb_helper->crtc_info[i].mode_set.fb = fb_helper->fb;
-       }
 
        if (new_fb) {
                info->var.pixclock = 0;
-               if (register_framebuffer(info) < 0) {
+               if (register_framebuffer(info) < 0)
                        return -EINVAL;
-               }
 
-               printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
-                      info->fix.id);
+               dev_info(fb_helper->dev->dev, "fb%d: %s frame buffer device\n",
+                               info->node, info->fix.id);
 
        } else {
                drm_fb_helper_set_par(info);
@@ -814,7 +829,7 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
        /* Switch back to kernel console on panic */
        /* multi card linked list maybe */
        if (list_empty(&kernel_fb_helper_list)) {
-               printk(KERN_INFO "drm: registered panic notifier\n");
+               dev_info(fb_helper->dev->dev, "registered panic notifier\n");
                atomic_notifier_chain_register(&panic_notifier_list,
                                               &paniced);
                register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
@@ -1002,11 +1017,11 @@ static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
 {
        bool enable;
 
-       if (strict) {
+       if (strict)
                enable = connector->status == connector_status_connected;
-       } else {
+       else
                enable = connector->status != connector_status_disconnected;
-       }
+
        return enable;
 }
 
@@ -1191,9 +1206,8 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
        for (c = 0; c < fb_helper->crtc_count; c++) {
                crtc = &fb_helper->crtc_info[c];
 
-               if ((encoder->possible_crtcs & (1 << c)) == 0) {
+               if ((encoder->possible_crtcs & (1 << c)) == 0)
                        continue;
-               }
 
                for (o = 0; o < n; o++)
                        if (best_crtcs[o] == crtc)
@@ -1246,6 +1260,11 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
                        sizeof(struct drm_display_mode *), GFP_KERNEL);
        enabled = kcalloc(dev->mode_config.num_connector,
                          sizeof(bool), GFP_KERNEL);
+       if (!crtcs || !modes || !enabled) {
+               DRM_ERROR("Memory allocation failed\n");
+               goto out;
+       }
+
 
        drm_enable_connectors(fb_helper, enabled);
 
@@ -1284,6 +1303,7 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
                }
        }
 
+out:
        kfree(crtcs);
        kfree(modes);
        kfree(enabled);
@@ -1291,12 +1311,14 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
 
 /**
  * drm_helper_initial_config - setup a sane initial connector configuration
- * @dev: DRM device
+ * @fb_helper: fb_helper device struct
+ * @bpp_sel: bpp value to use for the framebuffer configuration
  *
  * LOCKING:
- * Called at init time, must take mode config lock.
+ * Called at init time by the driver to set up the @fb_helper initial
+ * configuration, must take the mode config lock.
  *
- * Scan the CRTCs and connectors and try to put together an initial setup.
+ * Scans the CRTCs and connectors and tries to put together an initial setup.
  * At the moment, this is a cloned configuration across all heads with
  * a new framebuffer object as the backing store.
  *
@@ -1319,9 +1341,9 @@ bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
        /*
         * we shouldn't end up with no modes here.
         */
-       if (count == 0) {
-               printk(KERN_INFO "No connectors reported connected with modes\n");
-       }
+       if (count == 0)
+               dev_info(fb_helper->dev->dev, "No connectors reported connected with modes\n");
+
        drm_setup_crtcs(fb_helper);
 
        return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
@@ -1330,7 +1352,7 @@ EXPORT_SYMBOL(drm_fb_helper_initial_config);
 
 /**
  * drm_fb_helper_hotplug_event - respond to a hotplug notification by
- *                               probing all the outputs attached to the fb.
+ *                               probing all the outputs attached to the fb
  * @fb_helper: the drm_fb_helper
  *
  * LOCKING:
index c3745c4..8025454 100644 (file)
@@ -67,10 +67,8 @@ void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key)
        hashed_key = hash_long(key, ht->order);
        DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key);
        h_list = &ht->table[hashed_key];
-       hlist_for_each(list, h_list) {
-               entry = hlist_entry(list, struct drm_hash_item, head);
+       hlist_for_each_entry(entry, list, h_list, head)
                DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key);
-       }
 }
 
 static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
@@ -83,8 +81,7 @@ static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
 
        hashed_key = hash_long(key, ht->order);
        h_list = &ht->table[hashed_key];
-       hlist_for_each(list, h_list) {
-               entry = hlist_entry(list, struct drm_hash_item, head);
+       hlist_for_each_entry(entry, list, h_list, head) {
                if (entry->key == key)
                        return list;
                if (entry->key > key)
@@ -93,6 +90,24 @@ static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
        return NULL;
 }
 
+static struct hlist_node *drm_ht_find_key_rcu(struct drm_open_hash *ht,
+                                             unsigned long key)
+{
+       struct drm_hash_item *entry;
+       struct hlist_head *h_list;
+       struct hlist_node *list;
+       unsigned int hashed_key;
+
+       hashed_key = hash_long(key, ht->order);
+       h_list = &ht->table[hashed_key];
+       hlist_for_each_entry_rcu(entry, list, h_list, head) {
+               if (entry->key == key)
+                       return list;
+               if (entry->key > key)
+                       break;
+       }
+       return NULL;
+}
 
 int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
 {
@@ -105,8 +120,7 @@ int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
        hashed_key = hash_long(key, ht->order);
        h_list = &ht->table[hashed_key];
        parent = NULL;
-       hlist_for_each(list, h_list) {
-               entry = hlist_entry(list, struct drm_hash_item, head);
+       hlist_for_each_entry(entry, list, h_list, head) {
                if (entry->key == key)
                        return -EINVAL;
                if (entry->key > key)
@@ -114,9 +128,9 @@ int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
                parent = list;
        }
        if (parent) {
-               hlist_add_after(parent, &item->head);
+               hlist_add_after_rcu(parent, &item->head);
        } else {
-               hlist_add_head(&item->head, h_list);
+               hlist_add_head_rcu(&item->head, h_list);
        }
        return 0;
 }
@@ -156,7 +170,7 @@ int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key,
 {
        struct hlist_node *list;
 
-       list = drm_ht_find_key(ht, key);
+       list = drm_ht_find_key_rcu(ht, key);
        if (!list)
                return -EINVAL;
 
@@ -171,7 +185,7 @@ int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
 
        list = drm_ht_find_key(ht, key);
        if (list) {
-               hlist_del_init(list);
+               hlist_del_init_rcu(list);
                return 0;
        }
        return -EINVAL;
@@ -179,7 +193,7 @@ int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
 
 int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item)
 {
-       hlist_del_init(&item->head);
+       hlist_del_init_rcu(&item->head);
        return 0;
 }
 EXPORT_SYMBOL(drm_ht_remove_item);
index 23dd975..e77bd8b 100644 (file)
@@ -287,6 +287,9 @@ int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
                req->value |= dev->driver->prime_fd_to_handle ? DRM_PRIME_CAP_IMPORT : 0;
                req->value |= dev->driver->prime_handle_to_fd ? DRM_PRIME_CAP_EXPORT : 0;
                break;
+       case DRM_CAP_TIMESTAMP_MONOTONIC:
+               req->value = drm_timestamp_monotonic;
+               break;
        default:
                return -EINVAL;
        }
index 3a3d0ce..19c01ca 100644 (file)
@@ -106,6 +106,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
        s64 diff_ns;
        int vblrc;
        struct timeval tvblank;
+       int count = DRM_TIMESTAMP_MAXRETRIES;
 
        /* Prevent vblank irq processing while disabling vblank irqs,
         * so no updates of timestamps or count can happen after we've
@@ -131,7 +132,10 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
        do {
                dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc);
                vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0);
-       } while (dev->last_vblank[crtc] != dev->driver->get_vblank_counter(dev, crtc));
+       } while (dev->last_vblank[crtc] != dev->driver->get_vblank_counter(dev, crtc) && (--count) && vblrc);
+
+       if (!count)
+               vblrc = 0;
 
        /* Compute time difference to stored timestamp of last vblank
         * as updated by last invocation of drm_handle_vblank() in vblank irq.
@@ -576,7 +580,8 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
                                          unsigned flags,
                                          struct drm_crtc *refcrtc)
 {
-       struct timeval stime, raw_time;
+       ktime_t stime, etime, mono_time_offset;
+       struct timeval tv_etime;
        struct drm_display_mode *mode;
        int vbl_status, vtotal, vdisplay;
        int vpos, hpos, i;
@@ -625,13 +630,15 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
                preempt_disable();
 
                /* Get system timestamp before query. */
-               do_gettimeofday(&stime);
+               stime = ktime_get();
 
                /* Get vertical and horizontal scanout pos. vpos, hpos. */
                vbl_status = dev->driver->get_scanout_position(dev, crtc, &vpos, &hpos);
 
                /* Get system timestamp after query. */
-               do_gettimeofday(&raw_time);
+               etime = ktime_get();
+               if (!drm_timestamp_monotonic)
+                       mono_time_offset = ktime_get_monotonic_offset();
 
                preempt_enable();
 
@@ -642,7 +649,7 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
                        return -EIO;
                }
 
-               duration_ns = timeval_to_ns(&raw_time) - timeval_to_ns(&stime);
+               duration_ns = ktime_to_ns(etime) - ktime_to_ns(stime);
 
                /* Accept result with <  max_error nsecs timing uncertainty. */
                if (duration_ns <= (s64) *max_error)
@@ -689,14 +696,20 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
                vbl_status |= 0x8;
        }
 
+       if (!drm_timestamp_monotonic)
+               etime = ktime_sub(etime, mono_time_offset);
+
+       /* save this only for debugging purposes */
+       tv_etime = ktime_to_timeval(etime);
        /* Subtract time delta from raw timestamp to get final
         * vblank_time timestamp for end of vblank.
         */
-       *vblank_time = ns_to_timeval(timeval_to_ns(&raw_time) - delta_ns);
+       etime = ktime_sub_ns(etime, delta_ns);
+       *vblank_time = ktime_to_timeval(etime);
 
        DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n",
                  crtc, (int)vbl_status, hpos, vpos,
-                 (long)raw_time.tv_sec, (long)raw_time.tv_usec,
+                 (long)tv_etime.tv_sec, (long)tv_etime.tv_usec,
                  (long)vblank_time->tv_sec, (long)vblank_time->tv_usec,
                  (int)duration_ns/1000, i);
 
@@ -708,6 +721,17 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
 }
 EXPORT_SYMBOL(drm_calc_vbltimestamp_from_scanoutpos);
 
+static struct timeval get_drm_timestamp(void)
+{
+       ktime_t now;
+
+       now = ktime_get();
+       if (!drm_timestamp_monotonic)
+               now = ktime_sub(now, ktime_get_monotonic_offset());
+
+       return ktime_to_timeval(now);
+}
+
 /**
  * drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent
  * vblank interval.
@@ -745,9 +769,9 @@ u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
        }
 
        /* GPU high precision timestamp query unsupported or failed.
-        * Return gettimeofday timestamp as best estimate.
+        * Return current monotonic/gettimeofday timestamp as best estimate.
         */
-       do_gettimeofday(tvblank);
+       *tvblank = get_drm_timestamp();
 
        return 0;
 }
@@ -802,6 +826,47 @@ u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
 }
 EXPORT_SYMBOL(drm_vblank_count_and_time);
 
+static void send_vblank_event(struct drm_device *dev,
+               struct drm_pending_vblank_event *e,
+               unsigned long seq, struct timeval *now)
+{
+       WARN_ON_SMP(!spin_is_locked(&dev->event_lock));
+       e->event.sequence = seq;
+       e->event.tv_sec = now->tv_sec;
+       e->event.tv_usec = now->tv_usec;
+
+       list_add_tail(&e->base.link,
+                     &e->base.file_priv->event_list);
+       wake_up_interruptible(&e->base.file_priv->event_wait);
+       trace_drm_vblank_event_delivered(e->base.pid, e->pipe,
+                                        e->event.sequence);
+}
+
+/**
+ * drm_send_vblank_event - helper to send vblank event after pageflip
+ * @dev: DRM device
+ * @crtc: CRTC in question
+ * @e: the event to send
+ *
+ * Updates sequence # and timestamp on event, and sends it to userspace.
+ * Caller must hold event lock.
+ */
+void drm_send_vblank_event(struct drm_device *dev, int crtc,
+               struct drm_pending_vblank_event *e)
+{
+       struct timeval now;
+       unsigned int seq;
+       if (crtc >= 0) {
+               seq = drm_vblank_count_and_time(dev, crtc, &now);
+       } else {
+               seq = 0;
+
+               now = get_drm_timestamp();
+       }
+       send_vblank_event(dev, e, seq, &now);
+}
+EXPORT_SYMBOL(drm_send_vblank_event);
+
 /**
  * drm_update_vblank_count - update the master vblank counter
  * @dev: DRM device
@@ -936,6 +1001,13 @@ void drm_vblank_put(struct drm_device *dev, int crtc)
 }
 EXPORT_SYMBOL(drm_vblank_put);
 
+/**
+ * drm_vblank_off - disable vblank events on a CRTC
+ * @dev: DRM device
+ * @crtc: CRTC in question
+ *
+ * Caller must hold event lock.
+ */
 void drm_vblank_off(struct drm_device *dev, int crtc)
 {
        struct drm_pending_vblank_event *e, *t;
@@ -949,22 +1021,19 @@ void drm_vblank_off(struct drm_device *dev, int crtc)
 
        /* Send any queued vblank events, lest the natives grow disquiet */
        seq = drm_vblank_count_and_time(dev, crtc, &now);
+
+       spin_lock(&dev->event_lock);
        list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
                if (e->pipe != crtc)
                        continue;
                DRM_DEBUG("Sending premature vblank event on disable: \
                          wanted %d, current %d\n",
                          e->event.sequence, seq);
-
-               e->event.sequence = seq;
-               e->event.tv_sec = now.tv_sec;
-               e->event.tv_usec = now.tv_usec;
+               list_del(&e->base.link);
                drm_vblank_put(dev, e->pipe);
-               list_move_tail(&e->base.link, &e->base.file_priv->event_list);
-               wake_up_interruptible(&e->base.file_priv->event_wait);
-               trace_drm_vblank_event_delivered(e->base.pid, e->pipe,
-                                                e->event.sequence);
+               send_vblank_event(dev, e, seq, &now);
        }
+       spin_unlock(&dev->event_lock);
 
        spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
 }
@@ -1107,15 +1176,9 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
 
        e->event.sequence = vblwait->request.sequence;
        if ((seq - vblwait->request.sequence) <= (1 << 23)) {
-               e->event.sequence = seq;
-               e->event.tv_sec = now.tv_sec;
-               e->event.tv_usec = now.tv_usec;
                drm_vblank_put(dev, pipe);
-               list_add_tail(&e->base.link, &e->base.file_priv->event_list);
-               wake_up_interruptible(&e->base.file_priv->event_wait);
+               send_vblank_event(dev, e, seq, &now);
                vblwait->reply.sequence = seq;
-               trace_drm_vblank_event_delivered(current->pid, pipe,
-                                                vblwait->request.sequence);
        } else {
                /* drm_handle_vblank_events will call drm_vblank_put */
                list_add_tail(&e->base.link, &dev->vblank_event_list);
@@ -1256,14 +1319,9 @@ static void drm_handle_vblank_events(struct drm_device *dev, int crtc)
                DRM_DEBUG("vblank event on %d, current %d\n",
                          e->event.sequence, seq);
 
-               e->event.sequence = seq;
-               e->event.tv_sec = now.tv_sec;
-               e->event.tv_usec = now.tv_usec;
+               list_del(&e->base.link);
                drm_vblank_put(dev, e->pipe);
-               list_move_tail(&e->base.link, &e->base.file_priv->event_list);
-               wake_up_interruptible(&e->base.file_priv->event_wait);
-               trace_drm_vblank_event_delivered(e->base.pid, e->pipe,
-                                                e->event.sequence);
+               send_vblank_event(dev, e, seq, &now);
        }
 
        spin_unlock_irqrestore(&dev->event_lock, flags);
index 59450f3..d8da30e 100644 (file)
@@ -46,7 +46,7 @@
  *
  * Describe @mode using DRM_DEBUG.
  */
-void drm_mode_debug_printmodeline(struct drm_display_mode *mode)
+void drm_mode_debug_printmodeline(const struct drm_display_mode *mode)
 {
        DRM_DEBUG_KMS("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d "
                        "0x%x 0x%x\n",
@@ -558,7 +558,7 @@ EXPORT_SYMBOL(drm_mode_list_concat);
  * RETURNS:
  * @mode->hdisplay
  */
-int drm_mode_width(struct drm_display_mode *mode)
+int drm_mode_width(const struct drm_display_mode *mode)
 {
        return mode->hdisplay;
 
@@ -579,7 +579,7 @@ EXPORT_SYMBOL(drm_mode_width);
  * RETURNS:
  * @mode->vdisplay
  */
-int drm_mode_height(struct drm_display_mode *mode)
+int drm_mode_height(const struct drm_display_mode *mode)
 {
        return mode->vdisplay;
 }
@@ -768,7 +768,7 @@ EXPORT_SYMBOL(drm_mode_duplicate);
  * RETURNS:
  * True if the modes are equal, false otherwise.
  */
-bool drm_mode_equal(struct drm_display_mode *mode1, struct drm_display_mode *mode2)
+bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2)
 {
        /* do clock check convert to PICOS so fb modes get matched
         * the same */
index ba33144..754bc96 100644 (file)
@@ -470,7 +470,7 @@ int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask)
 {
        struct pci_dev *root;
        int pos;
-       u32 lnkcap, lnkcap2;
+       u32 lnkcap = 0, lnkcap2 = 0;
 
        *mask = 0;
        if (!dev->pdev)
index c236fd2..200e104 100644 (file)
@@ -46,16 +46,24 @@ EXPORT_SYMBOL(drm_vblank_offdelay);
 unsigned int drm_timestamp_precision = 20;  /* Default to 20 usecs. */
 EXPORT_SYMBOL(drm_timestamp_precision);
 
+/*
+ * Default to use monotonic timestamps for wait-for-vblank and page-flip
+ * complete events.
+ */
+unsigned int drm_timestamp_monotonic = 1;
+
 MODULE_AUTHOR(CORE_AUTHOR);
 MODULE_DESCRIPTION(CORE_DESC);
 MODULE_LICENSE("GPL and additional rights");
 MODULE_PARM_DESC(debug, "Enable debug output");
 MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]");
 MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
+MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
 
 module_param_named(debug, drm_debug, int, 0600);
 module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
 module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
+module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
 
 struct idr drm_minors_idr;
 
@@ -221,20 +229,20 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data,
        if (!file_priv->master)
                return -EINVAL;
 
-       if (!file_priv->minor->master &&
-           file_priv->minor->master != file_priv->master) {
-               mutex_lock(&dev->struct_mutex);
-               file_priv->minor->master = drm_master_get(file_priv->master);
-               file_priv->is_master = 1;
-               if (dev->driver->master_set) {
-                       ret = dev->driver->master_set(dev, file_priv, false);
-                       if (unlikely(ret != 0)) {
-                               file_priv->is_master = 0;
-                               drm_master_put(&file_priv->minor->master);
-                       }
+       if (file_priv->minor->master)
+               return -EINVAL;
+
+       mutex_lock(&dev->struct_mutex);
+       file_priv->minor->master = drm_master_get(file_priv->master);
+       file_priv->is_master = 1;
+       if (dev->driver->master_set) {
+               ret = dev->driver->master_set(dev, file_priv, false);
+               if (unlikely(ret != 0)) {
+                       file_priv->is_master = 0;
+                       drm_master_put(&file_priv->minor->master);
                }
-               mutex_unlock(&dev->struct_mutex);
        }
+       mutex_unlock(&dev->struct_mutex);
 
        return 0;
 }
@@ -492,10 +500,7 @@ void drm_put_dev(struct drm_device *dev)
        drm_put_minor(&dev->primary);
 
        list_del(&dev->driver_item);
-       if (dev->devname) {
-               kfree(dev->devname);
-               dev->devname = NULL;
-       }
+       kfree(dev->devname);
        kfree(dev);
 }
 EXPORT_SYMBOL(drm_put_dev);
index 05cd8fe..0229665 100644 (file)
@@ -182,7 +182,7 @@ static ssize_t dpms_show(struct device *device,
        uint64_t dpms_status;
        int ret;
 
-       ret = drm_connector_property_get_value(connector,
+       ret = drm_object_property_get_value(&connector->base,
                                            dev->mode_config.dpms_property,
                                            &dpms_status);
        if (ret)
@@ -277,7 +277,7 @@ static ssize_t subconnector_show(struct device *device,
                return 0;
        }
 
-       ret = drm_connector_property_get_value(connector, prop, &subconnector);
+       ret = drm_object_property_get_value(&connector->base, prop, &subconnector);
        if (ret)
                return 0;
 
@@ -318,7 +318,7 @@ static ssize_t select_subconnector_show(struct device *device,
                return 0;
        }
 
-       ret = drm_connector_property_get_value(connector, prop, &subconnector);
+       ret = drm_object_property_get_value(&connector->base, prop, &subconnector);
        if (ret)
                return 0;
 
index fc345d4..1d1f1e5 100644 (file)
@@ -10,6 +10,12 @@ config DRM_EXYNOS
          Choose this option if you have a Samsung SoC EXYNOS chipset.
          If M is selected the module will be called exynosdrm.
 
+config DRM_EXYNOS_IOMMU
+       bool "EXYNOS DRM IOMMU Support"
+       depends on DRM_EXYNOS && EXYNOS_IOMMU && ARM_DMA_USE_IOMMU
+       help
+         Choose this option if you want to use IOMMU feature for DRM.
+
 config DRM_EXYNOS_DMABUF
        bool "EXYNOS DRM DMABUF"
        depends on DRM_EXYNOS
@@ -39,3 +45,27 @@ config DRM_EXYNOS_G2D
        depends on DRM_EXYNOS && !VIDEO_SAMSUNG_S5P_G2D
        help
          Choose this option if you want to use Exynos G2D for DRM.
+
+config DRM_EXYNOS_IPP
+       bool "Exynos DRM IPP"
+       depends on DRM_EXYNOS
+       help
+         Choose this option if you want to use IPP feature for DRM.
+
+config DRM_EXYNOS_FIMC
+       bool "Exynos DRM FIMC"
+       depends on DRM_EXYNOS_IPP
+       help
+         Choose this option if you want to use Exynos FIMC for DRM.
+
+config DRM_EXYNOS_ROTATOR
+       bool "Exynos DRM Rotator"
+       depends on DRM_EXYNOS_IPP
+       help
+         Choose this option if you want to use Exynos Rotator for DRM.
+
+config DRM_EXYNOS_GSC
+       bool "Exynos DRM GSC"
+       depends on DRM_EXYNOS_IPP && ARCH_EXYNOS5
+       help
+         Choose this option if you want to use Exynos GSC for DRM.
index eb651ca..639b49e 100644 (file)
@@ -8,6 +8,7 @@ exynosdrm-y := exynos_drm_drv.o exynos_drm_encoder.o exynos_drm_connector.o \
                exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o \
                exynos_drm_plane.o
 
+exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o
 exynosdrm-$(CONFIG_DRM_EXYNOS_DMABUF) += exynos_drm_dmabuf.o
 exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD)    += exynos_drm_fimd.o
 exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI)    += exynos_hdmi.o exynos_mixer.o \
@@ -15,5 +16,9 @@ exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI)   += exynos_hdmi.o exynos_mixer.o \
                                           exynos_drm_hdmi.o
 exynosdrm-$(CONFIG_DRM_EXYNOS_VIDI)    += exynos_drm_vidi.o
 exynosdrm-$(CONFIG_DRM_EXYNOS_G2D)     += exynos_drm_g2d.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_IPP)     += exynos_drm_ipp.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_FIMC)    += exynos_drm_fimc.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_ROTATOR) += exynos_drm_rotator.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_GSC)     += exynos_drm_gsc.o
 
 obj-$(CONFIG_DRM_EXYNOS)               += exynosdrm.o
index 37e6ec7..bef43e0 100644 (file)
@@ -48,6 +48,7 @@ static struct i2c_device_id ddc_idtable[] = {
        { },
 };
 
+#ifdef CONFIG_OF
 static struct of_device_id hdmiddc_match_types[] = {
        {
                .compatible = "samsung,exynos5-hdmiddc",
@@ -55,12 +56,13 @@ static struct of_device_id hdmiddc_match_types[] = {
                /* end node */
        }
 };
+#endif
 
 struct i2c_driver ddc_driver = {
        .driver = {
                .name = "exynos-hdmiddc",
                .owner = THIS_MODULE,
-               .of_match_table = hdmiddc_match_types,
+               .of_match_table = of_match_ptr(hdmiddc_match_types),
        },
        .id_table       = ddc_idtable,
        .probe          = s5p_ddc_probe,
index 118c117..9601bad 100644 (file)
 static int lowlevel_buffer_allocate(struct drm_device *dev,
                unsigned int flags, struct exynos_drm_gem_buf *buf)
 {
-       dma_addr_t start_addr;
-       unsigned int npages, i = 0;
-       struct scatterlist *sgl;
        int ret = 0;
+       enum dma_attr attr;
+       unsigned int nr_pages;
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
-       if (IS_NONCONTIG_BUFFER(flags)) {
-               DRM_DEBUG_KMS("not support allocation type.\n");
-               return -EINVAL;
-       }
-
        if (buf->dma_addr) {
                DRM_DEBUG_KMS("already allocated.\n");
                return 0;
        }
 
-       if (buf->size >= SZ_1M) {
-               npages = buf->size >> SECTION_SHIFT;
-               buf->page_size = SECTION_SIZE;
-       } else if (buf->size >= SZ_64K) {
-               npages = buf->size >> 16;
-               buf->page_size = SZ_64K;
-       } else {
-               npages = buf->size >> PAGE_SHIFT;
-               buf->page_size = PAGE_SIZE;
-       }
+       init_dma_attrs(&buf->dma_attrs);
 
-       buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
-       if (!buf->sgt) {
-               DRM_ERROR("failed to allocate sg table.\n");
-               return -ENOMEM;
-       }
+       /*
+        * if EXYNOS_BO_CONTIG, fully physically contiguous memory
+        * region will be allocated else physically contiguous
+        * as possible.
+        */
+       if (flags & EXYNOS_BO_CONTIG)
+               dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &buf->dma_attrs);
 
-       ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL);
-       if (ret < 0) {
-               DRM_ERROR("failed to initialize sg table.\n");
-               kfree(buf->sgt);
-               buf->sgt = NULL;
-               return -ENOMEM;
-       }
+       /*
+        * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
+        * else cachable mapping.
+        */
+       if (flags & EXYNOS_BO_WC || !(flags & EXYNOS_BO_CACHABLE))
+               attr = DMA_ATTR_WRITE_COMBINE;
+       else
+               attr = DMA_ATTR_NON_CONSISTENT;
 
-       buf->kvaddr = dma_alloc_writecombine(dev->dev, buf->size,
-                       &buf->dma_addr, GFP_KERNEL);
-       if (!buf->kvaddr) {
-               DRM_ERROR("failed to allocate buffer.\n");
-               ret = -ENOMEM;
-               goto err1;
-       }
+       dma_set_attr(attr, &buf->dma_attrs);
+       dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &buf->dma_attrs);
 
-       buf->pages = kzalloc(sizeof(struct page) * npages, GFP_KERNEL);
+       buf->pages = dma_alloc_attrs(dev->dev, buf->size,
+                       &buf->dma_addr, GFP_KERNEL, &buf->dma_attrs);
        if (!buf->pages) {
-               DRM_ERROR("failed to allocate pages.\n");
-               ret = -ENOMEM;
-               goto err2;
+               DRM_ERROR("failed to allocate buffer.\n");
+               return -ENOMEM;
        }
 
-       sgl = buf->sgt->sgl;
-       start_addr = buf->dma_addr;
-
-       while (i < npages) {
-               buf->pages[i] = phys_to_page(start_addr);
-               sg_set_page(sgl, buf->pages[i], buf->page_size, 0);
-               sg_dma_address(sgl) = start_addr;
-               start_addr += buf->page_size;
-               sgl = sg_next(sgl);
-               i++;
+       nr_pages = buf->size >> PAGE_SHIFT;
+       buf->sgt = drm_prime_pages_to_sg(buf->pages, nr_pages);
+       if (!buf->sgt) {
+               DRM_ERROR("failed to get sg table.\n");
+               ret = -ENOMEM;
+               goto err_free_attrs;
        }
 
-       DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n",
-                       (unsigned long)buf->kvaddr,
+       DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
                        (unsigned long)buf->dma_addr,
                        buf->size);
 
        return ret;
-err2:
-       dma_free_writecombine(dev->dev, buf->size, buf->kvaddr,
-                       (dma_addr_t)buf->dma_addr);
+
+err_free_attrs:
+       dma_free_attrs(dev->dev, buf->size, buf->pages,
+                       (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
        buf->dma_addr = (dma_addr_t)NULL;
-err1:
-       sg_free_table(buf->sgt);
-       kfree(buf->sgt);
-       buf->sgt = NULL;
 
        return ret;
 }
@@ -125,23 +100,12 @@ static void lowlevel_buffer_deallocate(struct drm_device *dev,
 {
        DRM_DEBUG_KMS("%s.\n", __FILE__);
 
-       /*
-        * release only physically continuous memory and
-        * non-continuous memory would be released by exynos
-        * gem framework.
-        */
-       if (IS_NONCONTIG_BUFFER(flags)) {
-               DRM_DEBUG_KMS("not support allocation type.\n");
-               return;
-       }
-
        if (!buf->dma_addr) {
                DRM_DEBUG_KMS("dma_addr is invalid.\n");
                return;
        }
 
-       DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n",
-                       (unsigned long)buf->kvaddr,
+       DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
                        (unsigned long)buf->dma_addr,
                        buf->size);
 
@@ -150,11 +114,8 @@ static void lowlevel_buffer_deallocate(struct drm_device *dev,
        kfree(buf->sgt);
        buf->sgt = NULL;
 
-       kfree(buf->pages);
-       buf->pages = NULL;
-
-       dma_free_writecombine(dev->dev, buf->size, buf->kvaddr,
-                               (dma_addr_t)buf->dma_addr);
+       dma_free_attrs(dev->dev, buf->size, buf->pages,
+                               (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
        buf->dma_addr = (dma_addr_t)NULL;
 }
 
index 3388e4e..25cf162 100644 (file)
@@ -34,12 +34,12 @@ struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
 void exynos_drm_fini_buf(struct drm_device *dev,
                                struct exynos_drm_gem_buf *buffer);
 
-/* allocate physical memory region and setup sgt and pages. */
+/* allocate physical memory region and setup sgt. */
 int exynos_drm_alloc_buf(struct drm_device *dev,
                                struct exynos_drm_gem_buf *buf,
                                unsigned int flags);
 
-/* release physical memory region, sgt and pages. */
+/* release physical memory region, and sgt. */
 void exynos_drm_free_buf(struct drm_device *dev,
                                unsigned int flags,
                                struct exynos_drm_gem_buf *buffer);
index fce245f..2efa4b0 100644 (file)
@@ -236,16 +236,21 @@ static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc,
                        goto out;
                }
 
+               spin_lock_irq(&dev->event_lock);
                list_add_tail(&event->base.link,
                                &dev_priv->pageflip_event_list);
+               spin_unlock_irq(&dev->event_lock);
 
                crtc->fb = fb;
                ret = exynos_drm_crtc_mode_set_base(crtc, crtc->x, crtc->y,
                                                    NULL);
                if (ret) {
                        crtc->fb = old_fb;
+
+                       spin_lock_irq(&dev->event_lock);
                        drm_vblank_put(dev, exynos_crtc->pipe);
                        list_del(&event->base.link);
+                       spin_unlock_irq(&dev->event_lock);
 
                        goto out;
                }
index fae1f2e..61d5a84 100644 (file)
 
 #include <linux/dma-buf.h>
 
-static struct sg_table *exynos_pages_to_sg(struct page **pages, int nr_pages,
-               unsigned int page_size)
+struct exynos_drm_dmabuf_attachment {
+       struct sg_table sgt;
+       enum dma_data_direction dir;
+};
+
+static int exynos_gem_attach_dma_buf(struct dma_buf *dmabuf,
+                                       struct device *dev,
+                                       struct dma_buf_attachment *attach)
 {
-       struct sg_table *sgt = NULL;
-       struct scatterlist *sgl;
-       int i, ret;
+       struct exynos_drm_dmabuf_attachment *exynos_attach;
 
-       sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
-       if (!sgt)
-               goto out;
+       exynos_attach = kzalloc(sizeof(*exynos_attach), GFP_KERNEL);
+       if (!exynos_attach)
+               return -ENOMEM;
 
-       ret = sg_alloc_table(sgt, nr_pages, GFP_KERNEL);
-       if (ret)
-               goto err_free_sgt;
+       exynos_attach->dir = DMA_NONE;
+       attach->priv = exynos_attach;
 
-       if (page_size < PAGE_SIZE)
-               page_size = PAGE_SIZE;
+       return 0;
+}
 
-       for_each_sg(sgt->sgl, sgl, nr_pages, i)
-               sg_set_page(sgl, pages[i], page_size, 0);
+static void exynos_gem_detach_dma_buf(struct dma_buf *dmabuf,
+                                       struct dma_buf_attachment *attach)
+{
+       struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
+       struct sg_table *sgt;
 
-       return sgt;
+       if (!exynos_attach)
+               return;
 
-err_free_sgt:
-       kfree(sgt);
-       sgt = NULL;
-out:
-       return NULL;
+       sgt = &exynos_attach->sgt;
+
+       if (exynos_attach->dir != DMA_NONE)
+               dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
+                               exynos_attach->dir);
+
+       sg_free_table(sgt);
+       kfree(exynos_attach);
+       attach->priv = NULL;
 }
 
 static struct sg_table *
                exynos_gem_map_dma_buf(struct dma_buf_attachment *attach,
                                        enum dma_data_direction dir)
 {
+       struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
        struct exynos_drm_gem_obj *gem_obj = attach->dmabuf->priv;
        struct drm_device *dev = gem_obj->base.dev;
        struct exynos_drm_gem_buf *buf;
+       struct scatterlist *rd, *wr;
        struct sg_table *sgt = NULL;
-       unsigned int npages;
-       int nents;
+       unsigned int i;
+       int nents, ret;
 
        DRM_DEBUG_PRIME("%s\n", __FILE__);
 
-       mutex_lock(&dev->struct_mutex);
+       if (WARN_ON(dir == DMA_NONE))
+               return ERR_PTR(-EINVAL);
+
+       /* just return current sgt if already requested. */
+       if (exynos_attach->dir == dir)
+               return &exynos_attach->sgt;
+
+       /* reattaching is not allowed. */
+       if (WARN_ON(exynos_attach->dir != DMA_NONE))
+               return ERR_PTR(-EBUSY);
 
        buf = gem_obj->buffer;
+       if (!buf) {
+               DRM_ERROR("buffer is null.\n");
+               return ERR_PTR(-ENOMEM);
+       }
 
-       /* there should always be pages allocated. */
-       if (!buf->pages) {
-               DRM_ERROR("pages is null.\n");
-               goto err_unlock;
+       sgt = &exynos_attach->sgt;
+
+       ret = sg_alloc_table(sgt, buf->sgt->orig_nents, GFP_KERNEL);
+       if (ret) {
+               DRM_ERROR("failed to alloc sgt.\n");
+               return ERR_PTR(-ENOMEM);
        }
 
-       npages = buf->size / buf->page_size;
+       mutex_lock(&dev->struct_mutex);
 
-       sgt = exynos_pages_to_sg(buf->pages, npages, buf->page_size);
-       if (!sgt) {
-               DRM_DEBUG_PRIME("exynos_pages_to_sg returned NULL!\n");
+       rd = buf->sgt->sgl;
+       wr = sgt->sgl;
+       for (i = 0; i < sgt->orig_nents; ++i) {
+               sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
+               rd = sg_next(rd);
+               wr = sg_next(wr);
+       }
+
+       nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
+       if (!nents) {
+               DRM_ERROR("failed to map sgl with iommu.\n");
+               sgt = ERR_PTR(-EIO);
                goto err_unlock;
        }
-       nents = dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir);
 
-       DRM_DEBUG_PRIME("npages = %d buffer size = 0x%lx page_size = 0x%lx\n",
-                       npages, buf->size, buf->page_size);
+       exynos_attach->dir = dir;
+       attach->priv = exynos_attach;
+
+       DRM_DEBUG_PRIME("buffer size = 0x%lx\n", buf->size);
 
 err_unlock:
        mutex_unlock(&dev->struct_mutex);
@@ -104,10 +142,7 @@ static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
                                                struct sg_table *sgt,
                                                enum dma_data_direction dir)
 {
-       dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
-       sg_free_table(sgt);
-       kfree(sgt);
-       sgt = NULL;
+       /* Nothing to do. */
 }
 
 static void exynos_dmabuf_release(struct dma_buf *dmabuf)
@@ -169,6 +204,8 @@ static int exynos_gem_dmabuf_mmap(struct dma_buf *dma_buf,
 }
 
 static struct dma_buf_ops exynos_dmabuf_ops = {
+       .attach                 = exynos_gem_attach_dma_buf,
+       .detach                 = exynos_gem_detach_dma_buf,
        .map_dma_buf            = exynos_gem_map_dma_buf,
        .unmap_dma_buf          = exynos_gem_unmap_dma_buf,
        .kmap                   = exynos_gem_dmabuf_kmap,
@@ -196,7 +233,6 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
        struct scatterlist *sgl;
        struct exynos_drm_gem_obj *exynos_gem_obj;
        struct exynos_drm_gem_buf *buffer;
-       struct page *page;
        int ret;
 
        DRM_DEBUG_PRIME("%s\n", __FILE__);
@@ -233,38 +269,27 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
                goto err_unmap_attach;
        }
 
-       buffer->pages = kzalloc(sizeof(*page) * sgt->nents, GFP_KERNEL);
-       if (!buffer->pages) {
-               DRM_ERROR("failed to allocate pages.\n");
-               ret = -ENOMEM;
-               goto err_free_buffer;
-       }
-
        exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size);
        if (!exynos_gem_obj) {
                ret = -ENOMEM;
-               goto err_free_pages;
+               goto err_free_buffer;
        }
 
        sgl = sgt->sgl;
 
-       if (sgt->nents == 1) {
-               buffer->dma_addr = sg_dma_address(sgt->sgl);
-               buffer->size = sg_dma_len(sgt->sgl);
+       buffer->size = dma_buf->size;
+       buffer->dma_addr = sg_dma_address(sgl);
 
+       if (sgt->nents == 1) {
                /* always physically continuous memory if sgt->nents is 1. */
                exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
        } else {
-               unsigned int i = 0;
-
-               buffer->dma_addr = sg_dma_address(sgl);
-               while (i < sgt->nents) {
-                       buffer->pages[i] = sg_page(sgl);
-                       buffer->size += sg_dma_len(sgl);
-                       sgl = sg_next(sgl);
-                       i++;
-               }
-
+               /*
+                * this case could be CONTIG or NONCONTIG type but for now
+                * sets NONCONTIG.
+                * TODO. we have to find a way that exporter can notify
+                * the type of its own buffer to importer.
+                */
                exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
        }
 
@@ -277,9 +302,6 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
 
        return &exynos_gem_obj->base;
 
-err_free_pages:
-       kfree(buffer->pages);
-       buffer->pages = NULL;
 err_free_buffer:
        kfree(buffer);
        buffer = NULL;
index 1de7baa..e0a8e80 100644 (file)
@@ -40,6 +40,8 @@
 #include "exynos_drm_vidi.h"
 #include "exynos_drm_dmabuf.h"
 #include "exynos_drm_g2d.h"
+#include "exynos_drm_ipp.h"
+#include "exynos_drm_iommu.h"
 
 #define DRIVER_NAME    "exynos"
 #define DRIVER_DESC    "Samsung SoC DRM"
@@ -49,6 +51,9 @@
 
 #define VBLANK_OFF_DELAY       50000
 
+/* platform device pointer for eynos drm device. */
+static struct platform_device *exynos_drm_pdev;
+
 static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
 {
        struct exynos_drm_private *private;
@@ -66,6 +71,18 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
        INIT_LIST_HEAD(&private->pageflip_event_list);
        dev->dev_private = (void *)private;
 
+       /*
+        * create mapping to manage iommu table and set a pointer to iommu
+        * mapping structure to iommu_mapping of private data.
+        * also this iommu_mapping can be used to check if iommu is supported
+        * or not.
+        */
+       ret = drm_create_iommu_mapping(dev);
+       if (ret < 0) {
+               DRM_ERROR("failed to create iommu mapping.\n");
+               goto err_crtc;
+       }
+
        drm_mode_config_init(dev);
 
        /* init kms poll for handling hpd */
@@ -80,7 +97,7 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
        for (nr = 0; nr < MAX_CRTC; nr++) {
                ret = exynos_drm_crtc_create(dev, nr);
                if (ret)
-                       goto err_crtc;
+                       goto err_release_iommu_mapping;
        }
 
        for (nr = 0; nr < MAX_PLANE; nr++) {
@@ -89,12 +106,12 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
 
                plane = exynos_plane_init(dev, possible_crtcs, false);
                if (!plane)
-                       goto err_crtc;
+                       goto err_release_iommu_mapping;
        }
 
        ret = drm_vblank_init(dev, MAX_CRTC);
        if (ret)
-               goto err_crtc;
+               goto err_release_iommu_mapping;
 
        /*
         * probe sub drivers such as display controller and hdmi driver,
@@ -126,6 +143,8 @@ err_drm_device:
        exynos_drm_device_unregister(dev);
 err_vblank:
        drm_vblank_cleanup(dev);
+err_release_iommu_mapping:
+       drm_release_iommu_mapping(dev);
 err_crtc:
        drm_mode_config_cleanup(dev);
        kfree(private);
@@ -142,6 +161,8 @@ static int exynos_drm_unload(struct drm_device *dev)
        drm_vblank_cleanup(dev);
        drm_kms_helper_poll_fini(dev);
        drm_mode_config_cleanup(dev);
+
+       drm_release_iommu_mapping(dev);
        kfree(dev->dev_private);
 
        dev->dev_private = NULL;
@@ -229,6 +250,14 @@ static struct drm_ioctl_desc exynos_ioctls[] = {
                        exynos_g2d_set_cmdlist_ioctl, DRM_UNLOCKED | DRM_AUTH),
        DRM_IOCTL_DEF_DRV(EXYNOS_G2D_EXEC,
                        exynos_g2d_exec_ioctl, DRM_UNLOCKED | DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_PROPERTY,
+                       exynos_drm_ipp_get_property, DRM_UNLOCKED | DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(EXYNOS_IPP_SET_PROPERTY,
+                       exynos_drm_ipp_set_property, DRM_UNLOCKED | DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(EXYNOS_IPP_QUEUE_BUF,
+                       exynos_drm_ipp_queue_buf, DRM_UNLOCKED | DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(EXYNOS_IPP_CMD_CTRL,
+                       exynos_drm_ipp_cmd_ctrl, DRM_UNLOCKED | DRM_AUTH),
 };
 
 static const struct file_operations exynos_drm_driver_fops = {
@@ -279,6 +308,7 @@ static int exynos_drm_platform_probe(struct platform_device *pdev)
 {
        DRM_DEBUG_DRIVER("%s\n", __FILE__);
 
+       pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
        exynos_drm_driver.num_ioctls = DRM_ARRAY_SIZE(exynos_ioctls);
 
        return drm_platform_init(&exynos_drm_driver, pdev);
@@ -324,6 +354,10 @@ static int __init exynos_drm_init(void)
        ret = platform_driver_register(&exynos_drm_common_hdmi_driver);
        if (ret < 0)
                goto out_common_hdmi;
+
+       ret = exynos_platform_device_hdmi_register();
+       if (ret < 0)
+               goto out_common_hdmi_dev;
 #endif
 
 #ifdef CONFIG_DRM_EXYNOS_VIDI
@@ -338,24 +372,80 @@ static int __init exynos_drm_init(void)
                goto out_g2d;
 #endif
 
+#ifdef CONFIG_DRM_EXYNOS_FIMC
+       ret = platform_driver_register(&fimc_driver);
+       if (ret < 0)
+               goto out_fimc;
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_ROTATOR
+       ret = platform_driver_register(&rotator_driver);
+       if (ret < 0)
+               goto out_rotator;
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_GSC
+       ret = platform_driver_register(&gsc_driver);
+       if (ret < 0)
+               goto out_gsc;
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_IPP
+       ret = platform_driver_register(&ipp_driver);
+       if (ret < 0)
+               goto out_ipp;
+#endif
+
        ret = platform_driver_register(&exynos_drm_platform_driver);
        if (ret < 0)
+               goto out_drm;
+
+       exynos_drm_pdev = platform_device_register_simple("exynos-drm", -1,
+                               NULL, 0);
+       if (IS_ERR_OR_NULL(exynos_drm_pdev)) {
+               ret = PTR_ERR(exynos_drm_pdev);
                goto out;
+       }
 
        return 0;
 
 out:
+       platform_driver_unregister(&exynos_drm_platform_driver);
+
+out_drm:
+#ifdef CONFIG_DRM_EXYNOS_IPP
+       platform_driver_unregister(&ipp_driver);
+out_ipp:
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_GSC
+       platform_driver_unregister(&gsc_driver);
+out_gsc:
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_ROTATOR
+       platform_driver_unregister(&rotator_driver);
+out_rotator:
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_FIMC
+       platform_driver_unregister(&fimc_driver);
+out_fimc:
+#endif
+
 #ifdef CONFIG_DRM_EXYNOS_G2D
        platform_driver_unregister(&g2d_driver);
 out_g2d:
 #endif
 
 #ifdef CONFIG_DRM_EXYNOS_VIDI
-out_vidi:
        platform_driver_unregister(&vidi_driver);
+out_vidi:
 #endif
 
 #ifdef CONFIG_DRM_EXYNOS_HDMI
+       exynos_platform_device_hdmi_unregister();
+out_common_hdmi_dev:
        platform_driver_unregister(&exynos_drm_common_hdmi_driver);
 out_common_hdmi:
        platform_driver_unregister(&mixer_driver);
@@ -375,13 +465,32 @@ static void __exit exynos_drm_exit(void)
 {
        DRM_DEBUG_DRIVER("%s\n", __FILE__);
 
+       platform_device_unregister(exynos_drm_pdev);
+
        platform_driver_unregister(&exynos_drm_platform_driver);
 
+#ifdef CONFIG_DRM_EXYNOS_IPP
+       platform_driver_unregister(&ipp_driver);
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_GSC
+       platform_driver_unregister(&gsc_driver);
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_ROTATOR
+       platform_driver_unregister(&rotator_driver);
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_FIMC
+       platform_driver_unregister(&fimc_driver);
+#endif
+
 #ifdef CONFIG_DRM_EXYNOS_G2D
        platform_driver_unregister(&g2d_driver);
 #endif
 
 #ifdef CONFIG_DRM_EXYNOS_HDMI
+       exynos_platform_device_hdmi_unregister();
        platform_driver_unregister(&exynos_drm_common_hdmi_driver);
        platform_driver_unregister(&mixer_driver);
        platform_driver_unregister(&hdmi_driver);
index a342310..f5a9774 100644 (file)
@@ -74,8 +74,6 @@ enum exynos_drm_output_type {
  * @commit: apply hardware specific overlay data to registers.
  * @enable: enable hardware specific overlay.
  * @disable: disable hardware specific overlay.
- * @wait_for_vblank: wait for vblank interrupt to make sure that
- *     hardware overlay is disabled.
  */
 struct exynos_drm_overlay_ops {
        void (*mode_set)(struct device *subdrv_dev,
@@ -83,7 +81,6 @@ struct exynos_drm_overlay_ops {
        void (*commit)(struct device *subdrv_dev, int zpos);
        void (*enable)(struct device *subdrv_dev, int zpos);
        void (*disable)(struct device *subdrv_dev, int zpos);
-       void (*wait_for_vblank)(struct device *subdrv_dev);
 };
 
 /*
@@ -110,7 +107,6 @@ struct exynos_drm_overlay_ops {
  * @pixel_format: fourcc pixel format of this overlay
  * @dma_addr: array of bus(accessed by dma) address to the memory region
  *           allocated for a overlay.
- * @vaddr: array of virtual memory addresss to this overlay.
  * @zpos: order of overlay layer(z position).
  * @default_win: a window to be enabled.
  * @color_key: color key on or off.
@@ -142,7 +138,6 @@ struct exynos_drm_overlay {
        unsigned int pitch;
        uint32_t pixel_format;
        dma_addr_t dma_addr[MAX_FB_BUFFER];
-       void __iomem *vaddr[MAX_FB_BUFFER];
        int zpos;
 
        bool default_win;
@@ -186,6 +181,8 @@ struct exynos_drm_display_ops {
  * @commit: set current hw specific display mode to hw.
  * @enable_vblank: specific driver callback for enabling vblank interrupt.
  * @disable_vblank: specific driver callback for disabling vblank interrupt.
+ * @wait_for_vblank: wait for vblank interrupt to make sure that
+ *     hardware overlay is updated.
  */
 struct exynos_drm_manager_ops {
        void (*dpms)(struct device *subdrv_dev, int mode);
@@ -200,6 +197,7 @@ struct exynos_drm_manager_ops {
        void (*commit)(struct device *subdrv_dev);
        int (*enable_vblank)(struct device *subdrv_dev);
        void (*disable_vblank)(struct device *subdrv_dev);
+       void (*wait_for_vblank)(struct device *subdrv_dev);
 };
 
 /*
@@ -231,16 +229,28 @@ struct exynos_drm_g2d_private {
        struct device           *dev;
        struct list_head        inuse_cmdlist;
        struct list_head        event_list;
-       struct list_head        gem_list;
-       unsigned int            gem_nr;
+       struct list_head        userptr_list;
+};
+
+struct exynos_drm_ipp_private {
+       struct device   *dev;
+       struct list_head        event_list;
 };
 
 struct drm_exynos_file_private {
        struct exynos_drm_g2d_private   *g2d_priv;
+       struct exynos_drm_ipp_private   *ipp_priv;
 };
 
 /*
  * Exynos drm private structure.
+ *
+ * @da_start: start address to device address space.
+ *     with iommu, device address space starts from this address
+ *     otherwise default one.
+ * @da_space_size: size of device address space.
+ *     if 0 then default value is used for it.
+ * @da_space_order: order to device address space.
  */
 struct exynos_drm_private {
        struct drm_fb_helper *fb_helper;
@@ -255,6 +265,10 @@ struct exynos_drm_private {
        struct drm_crtc *crtc[MAX_CRTC];
        struct drm_property *plane_zpos_property;
        struct drm_property *crtc_mode_property;
+
+       unsigned long da_start;
+       unsigned long da_space_size;
+       unsigned long da_space_order;
 };
 
 /*
@@ -318,10 +332,25 @@ int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *drm_subdrv);
 int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file);
 void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file);
 
+/*
+ * this function registers exynos drm hdmi platform device. It ensures only one
+ * instance of the device is created.
+ */
+extern int exynos_platform_device_hdmi_register(void);
+
+/*
+ * this function unregisters exynos drm hdmi platform device if it exists.
+ */
+void exynos_platform_device_hdmi_unregister(void);
+
 extern struct platform_driver fimd_driver;
 extern struct platform_driver hdmi_driver;
 extern struct platform_driver mixer_driver;
 extern struct platform_driver exynos_drm_common_hdmi_driver;
 extern struct platform_driver vidi_driver;
 extern struct platform_driver g2d_driver;
+extern struct platform_driver fimc_driver;
+extern struct platform_driver rotator_driver;
+extern struct platform_driver gsc_driver;
+extern struct platform_driver ipp_driver;
 #endif
index f2df06c..3014852 100644 (file)
@@ -234,6 +234,32 @@ static void exynos_drm_encoder_commit(struct drm_encoder *encoder)
        exynos_encoder->dpms = DRM_MODE_DPMS_ON;
 }
 
+void exynos_drm_encoder_complete_scanout(struct drm_framebuffer *fb)
+{
+       struct exynos_drm_encoder *exynos_encoder;
+       struct exynos_drm_manager_ops *ops;
+       struct drm_device *dev = fb->dev;
+       struct drm_encoder *encoder;
+
+       /*
+        * make sure that overlay data are updated to real hardware
+        * for all encoders.
+        */
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+               exynos_encoder = to_exynos_encoder(encoder);
+               ops = exynos_encoder->manager->ops;
+
+               /*
+                * wait for vblank interrupt
+                * - this makes sure that overlay data are updated to
+                *      real hardware.
+                */
+               if (ops->wait_for_vblank)
+                       ops->wait_for_vblank(exynos_encoder->manager->dev);
+       }
+}
+
+
 static void exynos_drm_encoder_disable(struct drm_encoder *encoder)
 {
        struct drm_plane *plane;
@@ -505,14 +531,4 @@ void exynos_drm_encoder_plane_disable(struct drm_encoder *encoder, void *data)
 
        if (overlay_ops && overlay_ops->disable)
                overlay_ops->disable(manager->dev, zpos);
-
-       /*
-        * wait for vblank interrupt
-        * - this makes sure that hardware overlay is disabled to avoid
-        * for the dma accesses to memory after gem buffer was released
-        * because the setting for disabling the overlay will be updated
-        * at vsync.
-        */
-       if (overlay_ops && overlay_ops->wait_for_vblank)
-               overlay_ops->wait_for_vblank(manager->dev);
 }
index 6470d9d..88bb25a 100644 (file)
@@ -46,5 +46,6 @@ void exynos_drm_encoder_plane_mode_set(struct drm_encoder *encoder, void *data);
 void exynos_drm_encoder_plane_commit(struct drm_encoder *encoder, void *data);
 void exynos_drm_encoder_plane_enable(struct drm_encoder *encoder, void *data);
 void exynos_drm_encoder_plane_disable(struct drm_encoder *encoder, void *data);
+void exynos_drm_encoder_complete_scanout(struct drm_framebuffer *fb);
 
 #endif
index 4ef4cd3..5426cc5 100644 (file)
 #include <drm/drm_crtc.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_helper.h>
+#include <uapi/drm/exynos_drm.h>
 
 #include "exynos_drm_drv.h"
 #include "exynos_drm_fb.h"
 #include "exynos_drm_gem.h"
+#include "exynos_drm_iommu.h"
+#include "exynos_drm_encoder.h"
 
 #define to_exynos_fb(x)        container_of(x, struct exynos_drm_fb, fb)
 
@@ -50,6 +53,32 @@ struct exynos_drm_fb {
        struct exynos_drm_gem_obj       *exynos_gem_obj[MAX_FB_BUFFER];
 };
 
+static int check_fb_gem_memory_type(struct drm_device *drm_dev,
+                               struct exynos_drm_gem_obj *exynos_gem_obj)
+{
+       unsigned int flags;
+
+       /*
+        * if exynos drm driver supports iommu then framebuffer can use
+        * all the buffer types.
+        */
+       if (is_drm_iommu_supported(drm_dev))
+               return 0;
+
+       flags = exynos_gem_obj->flags;
+
+       /*
+        * without iommu support, not support physically non-continuous memory
+        * for framebuffer.
+        */
+       if (IS_NONCONTIG_BUFFER(flags)) {
+               DRM_ERROR("cannot use this gem memory type for fb.\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
 {
        struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
@@ -57,6 +86,9 @@ static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
+       /* make sure that overlay data are updated before relesing fb. */
+       exynos_drm_encoder_complete_scanout(fb);
+
        drm_framebuffer_cleanup(fb);
 
        for (i = 0; i < ARRAY_SIZE(exynos_fb->exynos_gem_obj); i++) {
@@ -128,23 +160,32 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
                            struct drm_gem_object *obj)
 {
        struct exynos_drm_fb *exynos_fb;
+       struct exynos_drm_gem_obj *exynos_gem_obj;
        int ret;
 
+       exynos_gem_obj = to_exynos_gem_obj(obj);
+
+       ret = check_fb_gem_memory_type(dev, exynos_gem_obj);
+       if (ret < 0) {
+               DRM_ERROR("cannot use this gem memory type for fb.\n");
+               return ERR_PTR(-EINVAL);
+       }
+
        exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
        if (!exynos_fb) {
                DRM_ERROR("failed to allocate exynos drm framebuffer\n");
                return ERR_PTR(-ENOMEM);
        }
 
+       drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
+       exynos_fb->exynos_gem_obj[0] = exynos_gem_obj;
+
        ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
        if (ret) {
                DRM_ERROR("failed to initialize framebuffer\n");
                return ERR_PTR(ret);
        }
 
-       drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
-       exynos_fb->exynos_gem_obj[0] = to_exynos_gem_obj(obj);
-
        return &exynos_fb->fb;
 }
 
@@ -190,9 +231,8 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
                      struct drm_mode_fb_cmd2 *mode_cmd)
 {
        struct drm_gem_object *obj;
-       struct drm_framebuffer *fb;
        struct exynos_drm_fb *exynos_fb;
-       int i;
+       int i, ret;
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
@@ -202,30 +242,56 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
                return ERR_PTR(-ENOENT);
        }
 
-       fb = exynos_drm_framebuffer_init(dev, mode_cmd, obj);
-       if (IS_ERR(fb)) {
-               drm_gem_object_unreference_unlocked(obj);
-               return fb;
+       exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
+       if (!exynos_fb) {
+               DRM_ERROR("failed to allocate exynos drm framebuffer\n");
+               return ERR_PTR(-ENOMEM);
        }
 
-       exynos_fb = to_exynos_fb(fb);
+       drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
+       exynos_fb->exynos_gem_obj[0] = to_exynos_gem_obj(obj);
        exynos_fb->buf_cnt = exynos_drm_format_num_buffers(mode_cmd);
 
        DRM_DEBUG_KMS("buf_cnt = %d\n", exynos_fb->buf_cnt);
 
        for (i = 1; i < exynos_fb->buf_cnt; i++) {
+               struct exynos_drm_gem_obj *exynos_gem_obj;
+               int ret;
+
                obj = drm_gem_object_lookup(dev, file_priv,
                                mode_cmd->handles[i]);
                if (!obj) {
                        DRM_ERROR("failed to lookup gem object\n");
-                       exynos_drm_fb_destroy(fb);
+                       kfree(exynos_fb);
                        return ERR_PTR(-ENOENT);
                }
 
+               exynos_gem_obj = to_exynos_gem_obj(obj);
+
+               ret = check_fb_gem_memory_type(dev, exynos_gem_obj);
+               if (ret < 0) {
+                       DRM_ERROR("cannot use this gem memory type for fb.\n");
+                       kfree(exynos_fb);
+                       return ERR_PTR(ret);
+               }
+
                exynos_fb->exynos_gem_obj[i] = to_exynos_gem_obj(obj);
        }
 
-       return fb;
+       ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
+       if (ret) {
+               for (i = 0; i < exynos_fb->buf_cnt; i++) {
+                       struct exynos_drm_gem_obj *gem_obj;
+
+                       gem_obj = exynos_fb->exynos_gem_obj[i];
+                       drm_gem_object_unreference_unlocked(&gem_obj->base);
+               }
+
+               kfree(exynos_fb);
+               return ERR_PTR(ret);
+       }
+
+       return &exynos_fb->fb;
 }
 
 struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb,
@@ -243,9 +309,7 @@ struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb,
        if (!buffer)
                return NULL;
 
-       DRM_DEBUG_KMS("vaddr = 0x%lx, dma_addr = 0x%lx\n",
-                       (unsigned long)buffer->kvaddr,
-                       (unsigned long)buffer->dma_addr);
+       DRM_DEBUG_KMS("dma_addr = 0x%lx\n", (unsigned long)buffer->dma_addr);
 
        return buffer;
 }
index e7466c4..f433eb7 100644 (file)
@@ -46,8 +46,38 @@ struct exynos_drm_fbdev {
        struct exynos_drm_gem_obj       *exynos_gem_obj;
 };
 
+static int exynos_drm_fb_mmap(struct fb_info *info,
+                       struct vm_area_struct *vma)
+{
+       struct drm_fb_helper *helper = info->par;
+       struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(helper);
+       struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
+       struct exynos_drm_gem_buf *buffer = exynos_gem_obj->buffer;
+       unsigned long vm_size;
+       int ret;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+
+       vm_size = vma->vm_end - vma->vm_start;
+
+       if (vm_size > buffer->size)
+               return -EINVAL;
+
+       ret = dma_mmap_attrs(helper->dev->dev, vma, buffer->pages,
+               buffer->dma_addr, buffer->size, &buffer->dma_attrs);
+       if (ret < 0) {
+               DRM_ERROR("failed to mmap.\n");
+               return ret;
+       }
+
+       return 0;
+}
+
 static struct fb_ops exynos_drm_fb_ops = {
        .owner          = THIS_MODULE,
+       .fb_mmap        = exynos_drm_fb_mmap,
        .fb_fillrect    = cfb_fillrect,
        .fb_copyarea    = cfb_copyarea,
        .fb_imageblit   = cfb_imageblit,
@@ -79,6 +109,17 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
                return -EFAULT;
        }
 
+       /* map pages with kernel virtual space. */
+       if (!buffer->kvaddr) {
+               unsigned int nr_pages = buffer->size >> PAGE_SHIFT;
+               buffer->kvaddr = vmap(buffer->pages, nr_pages, VM_MAP,
+                                       pgprot_writecombine(PAGE_KERNEL));
+               if (!buffer->kvaddr) {
+                       DRM_ERROR("failed to map pages to kernel space.\n");
+                       return -EIO;
+               }
+       }
+
        /* buffer count to framebuffer always is 1 at booting time. */
        exynos_drm_fb_set_buf_cnt(fb, 1);
 
@@ -87,8 +128,8 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
 
        dev->mode_config.fb_base = (resource_size_t)buffer->dma_addr;
        fbi->screen_base = buffer->kvaddr + offset;
-       fbi->fix.smem_start = (unsigned long)(page_to_phys(buffer->pages[0]) +
-                               offset);
+       fbi->fix.smem_start = (unsigned long)
+                       (page_to_phys(sg_page(buffer->sgt->sgl)) + offset);
        fbi->screen_size = size;
        fbi->fix.smem_len = size;
 
@@ -134,7 +175,7 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
        exynos_gem_obj = exynos_drm_gem_create(dev, 0, size);
        if (IS_ERR(exynos_gem_obj)) {
                ret = PTR_ERR(exynos_gem_obj);
-               goto out;
+               goto err_release_framebuffer;
        }
 
        exynos_fbdev->exynos_gem_obj = exynos_gem_obj;
@@ -144,7 +185,7 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
        if (IS_ERR_OR_NULL(helper->fb)) {
                DRM_ERROR("failed to create drm framebuffer.\n");
                ret = PTR_ERR(helper->fb);
-               goto out;
+               goto err_destroy_gem;
        }
 
        helper->fbdev = fbi;
@@ -156,14 +197,24 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
        ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
        if (ret) {
                DRM_ERROR("failed to allocate cmap.\n");
-               goto out;
+               goto err_destroy_framebuffer;
        }
 
        ret = exynos_drm_fbdev_update(helper, helper->fb);
-       if (ret < 0) {
-               fb_dealloc_cmap(&fbi->cmap);
-               goto out;
-       }
+       if (ret < 0)
+               goto err_dealloc_cmap;
+
+       mutex_unlock(&dev->struct_mutex);
+       return ret;
+
+err_dealloc_cmap:
+       fb_dealloc_cmap(&fbi->cmap);
+err_destroy_framebuffer:
+       drm_framebuffer_cleanup(helper->fb);
+err_destroy_gem:
+       exynos_drm_gem_destroy(exynos_gem_obj);
+err_release_framebuffer:
+       framebuffer_release(fbi);
 
 /*
  * if failed, all resources allocated above would be released by
@@ -265,8 +316,13 @@ err_init:
 static void exynos_drm_fbdev_destroy(struct drm_device *dev,
                                      struct drm_fb_helper *fb_helper)
 {
+       struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(fb_helper);
+       struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
        struct drm_framebuffer *fb;
 
+       if (exynos_gem_obj->buffer->kvaddr)
+               vunmap(exynos_gem_obj->buffer->kvaddr);
+
        /* release drm framebuffer and real buffer */
        if (fb_helper->fb && fb_helper->fb->funcs) {
                fb = fb_helper->fb;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
new file mode 100644 (file)
index 0000000..61ea242
--- /dev/null
@@ -0,0 +1,2001 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * Authors:
+ *     Eunchul Kim <chulspro.kim@samsung.com>
+ *     Jinyoung Jeon <jy0.jeon@samsung.com>
+ *     Sangmin Lee <lsmin.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+#include <plat/map-base.h>
+
+#include <drm/drmP.h>
+#include <drm/exynos_drm.h>
+#include "regs-fimc.h"
+#include "exynos_drm_ipp.h"
+#include "exynos_drm_fimc.h"
+
+/*
+ * FIMC is stand for Fully Interactive Mobile Camera and
+ * supports image scaler/rotator and input/output DMA operations.
+ * input DMA reads image data from the memory.
+ * output DMA writes image data to memory.
+ * FIMC supports image rotation and image effect functions.
+ *
+ * M2M operation : supports crop/scale/rotation/csc so on.
+ * Memory ----> FIMC H/W ----> Memory.
+ * Writeback operation : supports cloned screen with FIMD.
+ * FIMD ----> FIMC H/W ----> Memory.
+ * Output operation : supports direct display using local path.
+ * Memory ----> FIMC H/W ----> FIMD.
+ */
+
+/*
+ * TODO
+ * 1. check suspend/resume api if needed.
+ * 2. need to check use case platform_device_id.
+ * 3. check src/dst size with, height.
+ * 4. added check_prepare api for right register.
+ * 5. need to add supported list in prop_list.
+ * 6. check prescaler/scaler optimization.
+ */
+
+#define FIMC_MAX_DEVS  4
+#define FIMC_MAX_SRC   2
+#define FIMC_MAX_DST   32
+#define FIMC_SHFACTOR  10
+#define FIMC_BUF_STOP  1
+#define FIMC_BUF_START 2
+#define FIMC_REG_SZ            32
+#define FIMC_WIDTH_ITU_709     1280
+#define FIMC_REFRESH_MAX       60
+#define FIMC_REFRESH_MIN       12
+#define FIMC_CROP_MAX  8192
+#define FIMC_CROP_MIN  32
+#define FIMC_SCALE_MAX 4224
+#define FIMC_SCALE_MIN 32
+
+#define get_fimc_context(dev)  platform_get_drvdata(to_platform_device(dev))
+#define get_ctx_from_ippdrv(ippdrv)    container_of(ippdrv,\
+                                       struct fimc_context, ippdrv);
+#define fimc_read(offset)              readl(ctx->regs + (offset))
+#define fimc_write(cfg, offset)        writel(cfg, ctx->regs + (offset))
+
+enum fimc_wb {
+       FIMC_WB_NONE,
+       FIMC_WB_A,
+       FIMC_WB_B,
+};
+
+/*
+ * A structure of scaler.
+ *
+ * @range: narrow, wide.
+ * @bypass: unused scaler path.
+ * @up_h: horizontal scale up.
+ * @up_v: vertical scale up.
+ * @hratio: horizontal ratio.
+ * @vratio: vertical ratio.
+ */
+struct fimc_scaler {
+       bool    range;
+       bool bypass;
+       bool up_h;
+       bool up_v;
+       u32 hratio;
+       u32 vratio;
+};
+
+/*
+ * A structure of scaler capability.
+ *
+ * find user manual table 43-1.
+ * @in_hori: scaler input horizontal size.
+ * @bypass: scaler bypass mode.
+ * @dst_h_wo_rot: target horizontal size without output rotation.
+ * @dst_h_rot: target horizontal size with output rotation.
+ * @rl_w_wo_rot: real width without input rotation.
+ * @rl_h_rot: real height without output rotation.
+ */
+struct fimc_capability {
+       /* scaler */
+       u32     in_hori;
+       u32     bypass;
+       /* output rotator */
+       u32     dst_h_wo_rot;
+       u32     dst_h_rot;
+       /* input rotator */
+       u32     rl_w_wo_rot;
+       u32     rl_h_rot;
+};
+
+/*
+ * A structure of fimc driver data.
+ *
+ * @parent_clk: name of parent clock.
+ */
+struct fimc_driverdata {
+       char    *parent_clk;
+};
+
+/*
+ * A structure of fimc context.
+ *
+ * @ippdrv: prepare initialization using ippdrv.
+ * @regs_res: register resources.
+ * @regs: memory mapped io registers.
+ * @lock: locking of operations.
+ * @sclk_fimc_clk: fimc source clock.
+ * @fimc_clk: fimc clock.
+ * @wb_clk: writeback a clock.
+ * @wb_b_clk: writeback b clock.
+ * @sc: scaler infomations.
+ * @odr: ordering of YUV.
+ * @ver: fimc version.
+ * @pol: porarity of writeback.
+ * @id: fimc id.
+ * @irq: irq number.
+ * @suspended: qos operations.
+ */
+struct fimc_context {
+       struct exynos_drm_ippdrv        ippdrv;
+       struct resource *regs_res;
+       void __iomem    *regs;
+       struct mutex    lock;
+       struct clk      *sclk_fimc_clk;
+       struct clk      *fimc_clk;
+       struct clk      *wb_clk;
+       struct clk      *wb_b_clk;
+       struct fimc_scaler      sc;
+       struct fimc_driverdata  *ddata;
+       struct exynos_drm_ipp_pol       pol;
+       int     id;
+       int     irq;
+       bool    suspended;
+};
+
+static void fimc_sw_reset(struct fimc_context *ctx, bool pattern)
+{
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:pattern[%d]\n", __func__, pattern);
+
+       cfg = fimc_read(EXYNOS_CISRCFMT);
+       cfg |= EXYNOS_CISRCFMT_ITU601_8BIT;
+       if (pattern)
+               cfg |= EXYNOS_CIGCTRL_TESTPATTERN_COLOR_BAR;
+
+       fimc_write(cfg, EXYNOS_CISRCFMT);
+
+       /* s/w reset */
+       cfg = fimc_read(EXYNOS_CIGCTRL);
+       cfg |= (EXYNOS_CIGCTRL_SWRST);
+       fimc_write(cfg, EXYNOS_CIGCTRL);
+
+       /* s/w reset complete */
+       cfg = fimc_read(EXYNOS_CIGCTRL);
+       cfg &= ~EXYNOS_CIGCTRL_SWRST;
+       fimc_write(cfg, EXYNOS_CIGCTRL);
+
+       /* reset sequence */
+       fimc_write(0x0, EXYNOS_CIFCNTSEQ);
+}
+
+static void fimc_set_camblk_fimd0_wb(struct fimc_context *ctx)
+{
+       u32 camblk_cfg;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       camblk_cfg = readl(SYSREG_CAMERA_BLK);
+       camblk_cfg &= ~(SYSREG_FIMD0WB_DEST_MASK);
+       camblk_cfg |= ctx->id << (SYSREG_FIMD0WB_DEST_SHIFT);
+
+       writel(camblk_cfg, SYSREG_CAMERA_BLK);
+}
+
+static void fimc_set_type_ctrl(struct fimc_context *ctx, enum fimc_wb wb)
+{
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:wb[%d]\n", __func__, wb);
+
+       cfg = fimc_read(EXYNOS_CIGCTRL);
+       cfg &= ~(EXYNOS_CIGCTRL_TESTPATTERN_MASK |
+               EXYNOS_CIGCTRL_SELCAM_ITU_MASK |
+               EXYNOS_CIGCTRL_SELCAM_MIPI_MASK |
+               EXYNOS_CIGCTRL_SELCAM_FIMC_MASK |
+               EXYNOS_CIGCTRL_SELWB_CAMIF_MASK |
+               EXYNOS_CIGCTRL_SELWRITEBACK_MASK);
+
+       switch (wb) {
+       case FIMC_WB_A:
+               cfg |= (EXYNOS_CIGCTRL_SELWRITEBACK_A |
+                       EXYNOS_CIGCTRL_SELWB_CAMIF_WRITEBACK);
+               break;
+       case FIMC_WB_B:
+               cfg |= (EXYNOS_CIGCTRL_SELWRITEBACK_B |
+                       EXYNOS_CIGCTRL_SELWB_CAMIF_WRITEBACK);
+               break;
+       case FIMC_WB_NONE:
+       default:
+               cfg |= (EXYNOS_CIGCTRL_SELCAM_ITU_A |
+                       EXYNOS_CIGCTRL_SELWRITEBACK_A |
+                       EXYNOS_CIGCTRL_SELCAM_MIPI_A |
+                       EXYNOS_CIGCTRL_SELCAM_FIMC_ITU);
+               break;
+       }
+
+       fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static void fimc_set_polarity(struct fimc_context *ctx,
+               struct exynos_drm_ipp_pol *pol)
+{
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:inv_pclk[%d]inv_vsync[%d]\n",
+               __func__, pol->inv_pclk, pol->inv_vsync);
+       DRM_DEBUG_KMS("%s:inv_href[%d]inv_hsync[%d]\n",
+               __func__, pol->inv_href, pol->inv_hsync);
+
+       cfg = fimc_read(EXYNOS_CIGCTRL);
+       cfg &= ~(EXYNOS_CIGCTRL_INVPOLPCLK | EXYNOS_CIGCTRL_INVPOLVSYNC |
+                EXYNOS_CIGCTRL_INVPOLHREF | EXYNOS_CIGCTRL_INVPOLHSYNC);
+
+       if (pol->inv_pclk)
+               cfg |= EXYNOS_CIGCTRL_INVPOLPCLK;
+       if (pol->inv_vsync)
+               cfg |= EXYNOS_CIGCTRL_INVPOLVSYNC;
+       if (pol->inv_href)
+               cfg |= EXYNOS_CIGCTRL_INVPOLHREF;
+       if (pol->inv_hsync)
+               cfg |= EXYNOS_CIGCTRL_INVPOLHSYNC;
+
+       fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static void fimc_handle_jpeg(struct fimc_context *ctx, bool enable)
+{
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
+
+       cfg = fimc_read(EXYNOS_CIGCTRL);
+       if (enable)
+               cfg |= EXYNOS_CIGCTRL_CAM_JPEG;
+       else
+               cfg &= ~EXYNOS_CIGCTRL_CAM_JPEG;
+
+       fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static void fimc_handle_irq(struct fimc_context *ctx, bool enable,
+               bool overflow, bool level)
+{
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:enable[%d]overflow[%d]level[%d]\n", __func__,
+                       enable, overflow, level);
+
+       cfg = fimc_read(EXYNOS_CIGCTRL);
+       if (enable) {
+               cfg &= ~(EXYNOS_CIGCTRL_IRQ_OVFEN | EXYNOS_CIGCTRL_IRQ_LEVEL);
+               cfg |= EXYNOS_CIGCTRL_IRQ_ENABLE;
+               if (overflow)
+                       cfg |= EXYNOS_CIGCTRL_IRQ_OVFEN;
+               if (level)
+                       cfg |= EXYNOS_CIGCTRL_IRQ_LEVEL;
+       } else
+               cfg &= ~(EXYNOS_CIGCTRL_IRQ_OVFEN | EXYNOS_CIGCTRL_IRQ_ENABLE);
+
+       fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static void fimc_clear_irq(struct fimc_context *ctx)
+{
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       cfg = fimc_read(EXYNOS_CIGCTRL);
+       cfg |= EXYNOS_CIGCTRL_IRQ_CLR;
+       fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static bool fimc_check_ovf(struct fimc_context *ctx)
+{
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       u32 cfg, status, flag;
+
+       status = fimc_read(EXYNOS_CISTATUS);
+       flag = EXYNOS_CISTATUS_OVFIY | EXYNOS_CISTATUS_OVFICB |
+               EXYNOS_CISTATUS_OVFICR;
+
+       DRM_DEBUG_KMS("%s:flag[0x%x]\n", __func__, flag);
+
+       if (status & flag) {
+               cfg = fimc_read(EXYNOS_CIWDOFST);
+               cfg |= (EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB |
+                       EXYNOS_CIWDOFST_CLROVFICR);
+
+               fimc_write(cfg, EXYNOS_CIWDOFST);
+
+               cfg = fimc_read(EXYNOS_CIWDOFST);
+               cfg &= ~(EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB |
+                       EXYNOS_CIWDOFST_CLROVFICR);
+
+               fimc_write(cfg, EXYNOS_CIWDOFST);
+
+               dev_err(ippdrv->dev, "occured overflow at %d, status 0x%x.\n",
+                       ctx->id, status);
+               return true;
+       }
+
+       return false;
+}
+
+static bool fimc_check_frame_end(struct fimc_context *ctx)
+{
+       u32 cfg;
+
+       cfg = fimc_read(EXYNOS_CISTATUS);
+
+       DRM_DEBUG_KMS("%s:cfg[0x%x]\n", __func__, cfg);
+
+       if (!(cfg & EXYNOS_CISTATUS_FRAMEEND))
+               return false;
+
+       cfg &= ~(EXYNOS_CISTATUS_FRAMEEND);
+       fimc_write(cfg, EXYNOS_CISTATUS);
+
+       return true;
+}
+
+static int fimc_get_buf_id(struct fimc_context *ctx)
+{
+       u32 cfg;
+       int frame_cnt, buf_id;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       cfg = fimc_read(EXYNOS_CISTATUS2);
+       frame_cnt = EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(cfg);
+
+       if (frame_cnt == 0)
+               frame_cnt = EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(cfg);
+
+       DRM_DEBUG_KMS("%s:present[%d]before[%d]\n", __func__,
+               EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(cfg),
+               EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(cfg));
+
+       if (frame_cnt == 0) {
+               DRM_ERROR("failed to get frame count.\n");
+               return -EIO;
+       }
+
+       buf_id = frame_cnt - 1;
+       DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, buf_id);
+
+       return buf_id;
+}
+
+static void fimc_handle_lastend(struct fimc_context *ctx, bool enable)
+{
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
+
+       cfg = fimc_read(EXYNOS_CIOCTRL);
+       if (enable)
+               cfg |= EXYNOS_CIOCTRL_LASTENDEN;
+       else
+               cfg &= ~EXYNOS_CIOCTRL_LASTENDEN;
+
+       fimc_write(cfg, EXYNOS_CIOCTRL);
+}
+
+
+static int fimc_src_set_fmt_order(struct fimc_context *ctx, u32 fmt)
+{
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+       /* RGB */
+       cfg = fimc_read(EXYNOS_CISCCTRL);
+       cfg &= ~EXYNOS_CISCCTRL_INRGB_FMT_RGB_MASK;
+
+       switch (fmt) {
+       case DRM_FORMAT_RGB565:
+               cfg |= EXYNOS_CISCCTRL_INRGB_FMT_RGB565;
+               fimc_write(cfg, EXYNOS_CISCCTRL);
+               return 0;
+       case DRM_FORMAT_RGB888:
+       case DRM_FORMAT_XRGB8888:
+               cfg |= EXYNOS_CISCCTRL_INRGB_FMT_RGB888;
+               fimc_write(cfg, EXYNOS_CISCCTRL);
+               return 0;
+       default:
+               /* bypass */
+               break;
+       }
+
+       /* YUV */
+       cfg = fimc_read(EXYNOS_MSCTRL);
+       cfg &= ~(EXYNOS_MSCTRL_ORDER2P_SHIFT_MASK |
+               EXYNOS_MSCTRL_C_INT_IN_2PLANE |
+               EXYNOS_MSCTRL_ORDER422_YCBYCR);
+
+       switch (fmt) {
+       case DRM_FORMAT_YUYV:
+               cfg |= EXYNOS_MSCTRL_ORDER422_YCBYCR;
+               break;
+       case DRM_FORMAT_YVYU:
+               cfg |= EXYNOS_MSCTRL_ORDER422_YCRYCB;
+               break;
+       case DRM_FORMAT_UYVY:
+               cfg |= EXYNOS_MSCTRL_ORDER422_CBYCRY;
+               break;
+       case DRM_FORMAT_VYUY:
+       case DRM_FORMAT_YUV444:
+               cfg |= EXYNOS_MSCTRL_ORDER422_CRYCBY;
+               break;
+       case DRM_FORMAT_NV21:
+       case DRM_FORMAT_NV61:
+               cfg |= (EXYNOS_MSCTRL_ORDER2P_LSB_CRCB |
+                       EXYNOS_MSCTRL_C_INT_IN_2PLANE);
+               break;
+       case DRM_FORMAT_YUV422:
+       case DRM_FORMAT_YUV420:
+       case DRM_FORMAT_YVU420:
+               cfg |= EXYNOS_MSCTRL_C_INT_IN_3PLANE;
+               break;
+       case DRM_FORMAT_NV12:
+       case DRM_FORMAT_NV12MT:
+       case DRM_FORMAT_NV16:
+               cfg |= (EXYNOS_MSCTRL_ORDER2P_LSB_CBCR |
+                       EXYNOS_MSCTRL_C_INT_IN_2PLANE);
+               break;
+       default:
+               dev_err(ippdrv->dev, "inavlid source yuv order 0x%x.\n", fmt);
+               return -EINVAL;
+       }
+
+       fimc_write(cfg, EXYNOS_MSCTRL);
+
+       return 0;
+}
+
+static int fimc_src_set_fmt(struct device *dev, u32 fmt)
+{
+       struct fimc_context *ctx = get_fimc_context(dev);
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+       cfg = fimc_read(EXYNOS_MSCTRL);
+       cfg &= ~EXYNOS_MSCTRL_INFORMAT_RGB;
+
+       switch (fmt) {
+       case DRM_FORMAT_RGB565:
+       case DRM_FORMAT_RGB888:
+       case DRM_FORMAT_XRGB8888:
+               cfg |= EXYNOS_MSCTRL_INFORMAT_RGB;
+               break;
+       case DRM_FORMAT_YUV444:
+               cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR420;
+               break;
+       case DRM_FORMAT_YUYV:
+       case DRM_FORMAT_YVYU:
+       case DRM_FORMAT_UYVY:
+       case DRM_FORMAT_VYUY:
+               cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR422_1PLANE;
+               break;
+       case DRM_FORMAT_NV16:
+       case DRM_FORMAT_NV61:
+       case DRM_FORMAT_YUV422:
+               cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR422;
+               break;
+       case DRM_FORMAT_YUV420:
+       case DRM_FORMAT_YVU420:
+       case DRM_FORMAT_NV12:
+       case DRM_FORMAT_NV21:
+       case DRM_FORMAT_NV12MT:
+               cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR420;
+               break;
+       default:
+               dev_err(ippdrv->dev, "inavlid source format 0x%x.\n", fmt);
+               return -EINVAL;
+       }
+
+       fimc_write(cfg, EXYNOS_MSCTRL);
+
+       cfg = fimc_read(EXYNOS_CIDMAPARAM);
+       cfg &= ~EXYNOS_CIDMAPARAM_R_MODE_MASK;
+
+       if (fmt == DRM_FORMAT_NV12MT)
+               cfg |= EXYNOS_CIDMAPARAM_R_MODE_64X32;
+       else
+               cfg |= EXYNOS_CIDMAPARAM_R_MODE_LINEAR;
+
+       fimc_write(cfg, EXYNOS_CIDMAPARAM);
+
+       return fimc_src_set_fmt_order(ctx, fmt);
+}
+
+static int fimc_src_set_transf(struct device *dev,
+               enum drm_exynos_degree degree,
+               enum drm_exynos_flip flip, bool *swap)
+{
+       struct fimc_context *ctx = get_fimc_context(dev);
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       u32 cfg1, cfg2;
+
+       DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__,
+               degree, flip);
+
+       cfg1 = fimc_read(EXYNOS_MSCTRL);
+       cfg1 &= ~(EXYNOS_MSCTRL_FLIP_X_MIRROR |
+               EXYNOS_MSCTRL_FLIP_Y_MIRROR);
+
+       cfg2 = fimc_read(EXYNOS_CITRGFMT);
+       cfg2 &= ~EXYNOS_CITRGFMT_INROT90_CLOCKWISE;
+
+       switch (degree) {
+       case EXYNOS_DRM_DEGREE_0:
+               if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+                       cfg1 |= EXYNOS_MSCTRL_FLIP_X_MIRROR;
+               if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+                       cfg1 |= EXYNOS_MSCTRL_FLIP_Y_MIRROR;
+               break;
+       case EXYNOS_DRM_DEGREE_90:
+               cfg2 |= EXYNOS_CITRGFMT_INROT90_CLOCKWISE;
+               if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+                       cfg1 |= EXYNOS_MSCTRL_FLIP_X_MIRROR;
+               if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+                       cfg1 |= EXYNOS_MSCTRL_FLIP_Y_MIRROR;
+               break;
+       case EXYNOS_DRM_DEGREE_180:
+               cfg1 |= (EXYNOS_MSCTRL_FLIP_X_MIRROR |
+                       EXYNOS_MSCTRL_FLIP_Y_MIRROR);
+               if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+                       cfg1 &= ~EXYNOS_MSCTRL_FLIP_X_MIRROR;
+               if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+                       cfg1 &= ~EXYNOS_MSCTRL_FLIP_Y_MIRROR;
+               break;
+       case EXYNOS_DRM_DEGREE_270:
+               cfg1 |= (EXYNOS_MSCTRL_FLIP_X_MIRROR |
+                       EXYNOS_MSCTRL_FLIP_Y_MIRROR);
+               cfg2 |= EXYNOS_CITRGFMT_INROT90_CLOCKWISE;
+               if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+                       cfg1 &= ~EXYNOS_MSCTRL_FLIP_X_MIRROR;
+               if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+                       cfg1 &= ~EXYNOS_MSCTRL_FLIP_Y_MIRROR;
+               break;
+       default:
+               dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
+               return -EINVAL;
+       }
+
+       fimc_write(cfg1, EXYNOS_MSCTRL);
+       fimc_write(cfg2, EXYNOS_CITRGFMT);
+       *swap = (cfg2 & EXYNOS_CITRGFMT_INROT90_CLOCKWISE) ? 1 : 0;
+
+       return 0;
+}
+
+static int fimc_set_window(struct fimc_context *ctx,
+               struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
+{
+       u32 cfg, h1, h2, v1, v2;
+
+       /* cropped image */
+       h1 = pos->x;
+       h2 = sz->hsize - pos->w - pos->x;
+       v1 = pos->y;
+       v2 = sz->vsize - pos->h - pos->y;
+
+       DRM_DEBUG_KMS("%s:x[%d]y[%d]w[%d]h[%d]hsize[%d]vsize[%d]\n",
+       __func__, pos->x, pos->y, pos->w, pos->h, sz->hsize, sz->vsize);
+       DRM_DEBUG_KMS("%s:h1[%d]h2[%d]v1[%d]v2[%d]\n", __func__,
+               h1, h2, v1, v2);
+
+       /*
+        * set window offset 1, 2 size
+        * check figure 43-21 in user manual
+        */
+       cfg = fimc_read(EXYNOS_CIWDOFST);
+       cfg &= ~(EXYNOS_CIWDOFST_WINHOROFST_MASK |
+               EXYNOS_CIWDOFST_WINVEROFST_MASK);
+       cfg |= (EXYNOS_CIWDOFST_WINHOROFST(h1) |
+               EXYNOS_CIWDOFST_WINVEROFST(v1));
+       cfg |= EXYNOS_CIWDOFST_WINOFSEN;
+       fimc_write(cfg, EXYNOS_CIWDOFST);
+
+       cfg = (EXYNOS_CIWDOFST2_WINHOROFST2(h2) |
+               EXYNOS_CIWDOFST2_WINVEROFST2(v2));
+       fimc_write(cfg, EXYNOS_CIWDOFST2);
+
+       return 0;
+}
+
+static int fimc_src_set_size(struct device *dev, int swap,
+               struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
+{
+       struct fimc_context *ctx = get_fimc_context(dev);
+       struct drm_exynos_pos img_pos = *pos;
+       struct drm_exynos_sz img_sz = *sz;
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:swap[%d]hsize[%d]vsize[%d]\n",
+               __func__, swap, sz->hsize, sz->vsize);
+
+       /* original size */
+       cfg = (EXYNOS_ORGISIZE_HORIZONTAL(img_sz.hsize) |
+               EXYNOS_ORGISIZE_VERTICAL(img_sz.vsize));
+
+       fimc_write(cfg, EXYNOS_ORGISIZE);
+
+       DRM_DEBUG_KMS("%s:x[%d]y[%d]w[%d]h[%d]\n", __func__,
+               pos->x, pos->y, pos->w, pos->h);
+
+       if (swap) {
+               img_pos.w = pos->h;
+               img_pos.h = pos->w;
+               img_sz.hsize = sz->vsize;
+               img_sz.vsize = sz->hsize;
+       }
+
+       /* set input DMA image size */
+       cfg = fimc_read(EXYNOS_CIREAL_ISIZE);
+       cfg &= ~(EXYNOS_CIREAL_ISIZE_HEIGHT_MASK |
+               EXYNOS_CIREAL_ISIZE_WIDTH_MASK);
+       cfg |= (EXYNOS_CIREAL_ISIZE_WIDTH(img_pos.w) |
+               EXYNOS_CIREAL_ISIZE_HEIGHT(img_pos.h));
+       fimc_write(cfg, EXYNOS_CIREAL_ISIZE);
+
+       /*
+        * set input FIFO image size
+        * for now, we support only ITU601 8 bit mode
+        */
+       cfg = (EXYNOS_CISRCFMT_ITU601_8BIT |
+               EXYNOS_CISRCFMT_SOURCEHSIZE(img_sz.hsize) |
+               EXYNOS_CISRCFMT_SOURCEVSIZE(img_sz.vsize));
+       fimc_write(cfg, EXYNOS_CISRCFMT);
+
+       /* offset Y(RGB), Cb, Cr */
+       cfg = (EXYNOS_CIIYOFF_HORIZONTAL(img_pos.x) |
+               EXYNOS_CIIYOFF_VERTICAL(img_pos.y));
+       fimc_write(cfg, EXYNOS_CIIYOFF);
+       cfg = (EXYNOS_CIICBOFF_HORIZONTAL(img_pos.x) |
+               EXYNOS_CIICBOFF_VERTICAL(img_pos.y));
+       fimc_write(cfg, EXYNOS_CIICBOFF);
+       cfg = (EXYNOS_CIICROFF_HORIZONTAL(img_pos.x) |
+               EXYNOS_CIICROFF_VERTICAL(img_pos.y));
+       fimc_write(cfg, EXYNOS_CIICROFF);
+
+       return fimc_set_window(ctx, &img_pos, &img_sz);
+}
+
+static int fimc_src_set_addr(struct device *dev,
+               struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
+               enum drm_exynos_ipp_buf_type buf_type)
+{
+       struct fimc_context *ctx = get_fimc_context(dev);
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
+       struct drm_exynos_ipp_property *property;
+       struct drm_exynos_ipp_config *config;
+
+       if (!c_node) {
+               DRM_ERROR("failed to get c_node.\n");
+               return -EINVAL;
+       }
+
+       property = &c_node->property;
+       if (!property) {
+               DRM_ERROR("failed to get property.\n");
+               return -EINVAL;
+       }
+
+       DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__,
+               property->prop_id, buf_id, buf_type);
+
+       if (buf_id > FIMC_MAX_SRC) {
+               dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
+               return -ENOMEM;
+       }
+
+       /* address register set */
+       switch (buf_type) {
+       case IPP_BUF_ENQUEUE:
+               config = &property->config[EXYNOS_DRM_OPS_SRC];
+               fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
+                       EXYNOS_CIIYSA(buf_id));
+
+               if (config->fmt == DRM_FORMAT_YVU420) {
+                       fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+                               EXYNOS_CIICBSA(buf_id));
+                       fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+                               EXYNOS_CIICRSA(buf_id));
+               } else {
+                       fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+                               EXYNOS_CIICBSA(buf_id));
+                       fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+                               EXYNOS_CIICRSA(buf_id));
+               }
+               break;
+       case IPP_BUF_DEQUEUE:
+               fimc_write(0x0, EXYNOS_CIIYSA(buf_id));
+               fimc_write(0x0, EXYNOS_CIICBSA(buf_id));
+               fimc_write(0x0, EXYNOS_CIICRSA(buf_id));
+               break;
+       default:
+               /* bypass */
+               break;
+       }
+
+       return 0;
+}
+
+static struct exynos_drm_ipp_ops fimc_src_ops = {
+       .set_fmt = fimc_src_set_fmt,
+       .set_transf = fimc_src_set_transf,
+       .set_size = fimc_src_set_size,
+       .set_addr = fimc_src_set_addr,
+};
+
+static int fimc_dst_set_fmt_order(struct fimc_context *ctx, u32 fmt)
+{
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+       /* RGB */
+       cfg = fimc_read(EXYNOS_CISCCTRL);
+       cfg &= ~EXYNOS_CISCCTRL_OUTRGB_FMT_RGB_MASK;
+
+       switch (fmt) {
+       case DRM_FORMAT_RGB565:
+               cfg |= EXYNOS_CISCCTRL_OUTRGB_FMT_RGB565;
+               fimc_write(cfg, EXYNOS_CISCCTRL);
+               return 0;
+       case DRM_FORMAT_RGB888:
+               cfg |= EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888;
+               fimc_write(cfg, EXYNOS_CISCCTRL);
+               return 0;
+       case DRM_FORMAT_XRGB8888:
+               cfg |= (EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888 |
+                       EXYNOS_CISCCTRL_EXTRGB_EXTENSION);
+               fimc_write(cfg, EXYNOS_CISCCTRL);
+               break;
+       default:
+               /* bypass */
+               break;
+       }
+
+       /* YUV */
+       cfg = fimc_read(EXYNOS_CIOCTRL);
+       cfg &= ~(EXYNOS_CIOCTRL_ORDER2P_MASK |
+               EXYNOS_CIOCTRL_ORDER422_MASK |
+               EXYNOS_CIOCTRL_YCBCR_PLANE_MASK);
+
+       switch (fmt) {
+       case DRM_FORMAT_XRGB8888:
+               cfg |= EXYNOS_CIOCTRL_ALPHA_OUT;
+               break;
+       case DRM_FORMAT_YUYV:
+               cfg |= EXYNOS_CIOCTRL_ORDER422_YCBYCR;
+               break;
+       case DRM_FORMAT_YVYU:
+               cfg |= EXYNOS_CIOCTRL_ORDER422_YCRYCB;
+               break;
+       case DRM_FORMAT_UYVY:
+               cfg |= EXYNOS_CIOCTRL_ORDER422_CBYCRY;
+               break;
+       case DRM_FORMAT_VYUY:
+               cfg |= EXYNOS_CIOCTRL_ORDER422_CRYCBY;
+               break;
+       case DRM_FORMAT_NV21:
+       case DRM_FORMAT_NV61:
+               cfg |= EXYNOS_CIOCTRL_ORDER2P_LSB_CRCB;
+               cfg |= EXYNOS_CIOCTRL_YCBCR_2PLANE;
+               break;
+       case DRM_FORMAT_YUV422:
+       case DRM_FORMAT_YUV420:
+       case DRM_FORMAT_YVU420:
+               cfg |= EXYNOS_CIOCTRL_YCBCR_3PLANE;
+               break;
+       case DRM_FORMAT_NV12:
+       case DRM_FORMAT_NV12MT:
+       case DRM_FORMAT_NV16:
+               cfg |= EXYNOS_CIOCTRL_ORDER2P_LSB_CBCR;
+               cfg |= EXYNOS_CIOCTRL_YCBCR_2PLANE;
+               break;
+       default:
+               dev_err(ippdrv->dev, "inavlid target yuv order 0x%x.\n", fmt);
+               return -EINVAL;
+       }
+
+       fimc_write(cfg, EXYNOS_CIOCTRL);
+
+       return 0;
+}
+
+static int fimc_dst_set_fmt(struct device *dev, u32 fmt)
+{
+       struct fimc_context *ctx = get_fimc_context(dev);
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+       cfg = fimc_read(EXYNOS_CIEXTEN);
+
+       if (fmt == DRM_FORMAT_AYUV) {
+               cfg |= EXYNOS_CIEXTEN_YUV444_OUT;
+               fimc_write(cfg, EXYNOS_CIEXTEN);
+       } else {
+               cfg &= ~EXYNOS_CIEXTEN_YUV444_OUT;
+               fimc_write(cfg, EXYNOS_CIEXTEN);
+
+               cfg = fimc_read(EXYNOS_CITRGFMT);
+               cfg &= ~EXYNOS_CITRGFMT_OUTFORMAT_MASK;
+
+               switch (fmt) {
+               case DRM_FORMAT_RGB565:
+               case DRM_FORMAT_RGB888:
+               case DRM_FORMAT_XRGB8888:
+                       cfg |= EXYNOS_CITRGFMT_OUTFORMAT_RGB;
+                       break;
+               case DRM_FORMAT_YUYV:
+               case DRM_FORMAT_YVYU:
+               case DRM_FORMAT_UYVY:
+               case DRM_FORMAT_VYUY:
+                       cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422_1PLANE;
+                       break;
+               case DRM_FORMAT_NV16:
+               case DRM_FORMAT_NV61:
+               case DRM_FORMAT_YUV422:
+                       cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422;
+                       break;
+               case DRM_FORMAT_YUV420:
+               case DRM_FORMAT_YVU420:
+               case DRM_FORMAT_NV12:
+               case DRM_FORMAT_NV12MT:
+               case DRM_FORMAT_NV21:
+                       cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR420;
+                       break;
+               default:
+                       dev_err(ippdrv->dev, "inavlid target format 0x%x.\n",
+                               fmt);
+                       return -EINVAL;
+               }
+
+               fimc_write(cfg, EXYNOS_CITRGFMT);
+       }
+
+       cfg = fimc_read(EXYNOS_CIDMAPARAM);
+       cfg &= ~EXYNOS_CIDMAPARAM_W_MODE_MASK;
+
+       if (fmt == DRM_FORMAT_NV12MT)
+               cfg |= EXYNOS_CIDMAPARAM_W_MODE_64X32;
+       else
+               cfg |= EXYNOS_CIDMAPARAM_W_MODE_LINEAR;
+
+       fimc_write(cfg, EXYNOS_CIDMAPARAM);
+
+       return fimc_dst_set_fmt_order(ctx, fmt);
+}
+
+static int fimc_dst_set_transf(struct device *dev,
+               enum drm_exynos_degree degree,
+               enum drm_exynos_flip flip, bool *swap)
+{
+       struct fimc_context *ctx = get_fimc_context(dev);
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__,
+               degree, flip);
+
+       cfg = fimc_read(EXYNOS_CITRGFMT);
+       cfg &= ~EXYNOS_CITRGFMT_FLIP_MASK;
+       cfg &= ~EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE;
+
+       switch (degree) {
+       case EXYNOS_DRM_DEGREE_0:
+               if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+                       cfg |= EXYNOS_CITRGFMT_FLIP_X_MIRROR;
+               if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+                       cfg |= EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
+               break;
+       case EXYNOS_DRM_DEGREE_90:
+               cfg |= EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE;
+               if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+                       cfg |= EXYNOS_CITRGFMT_FLIP_X_MIRROR;
+               if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+                       cfg |= EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
+               break;
+       case EXYNOS_DRM_DEGREE_180:
+               cfg |= (EXYNOS_CITRGFMT_FLIP_X_MIRROR |
+                       EXYNOS_CITRGFMT_FLIP_Y_MIRROR);
+               if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+                       cfg &= ~EXYNOS_CITRGFMT_FLIP_X_MIRROR;
+               if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+                       cfg &= ~EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
+               break;
+       case EXYNOS_DRM_DEGREE_270:
+               cfg |= (EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE |
+                       EXYNOS_CITRGFMT_FLIP_X_MIRROR |
+                       EXYNOS_CITRGFMT_FLIP_Y_MIRROR);
+               if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+                       cfg &= ~EXYNOS_CITRGFMT_FLIP_X_MIRROR;
+               if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+                       cfg &= ~EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
+               break;
+       default:
+               dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
+               return -EINVAL;
+       }
+
+       fimc_write(cfg, EXYNOS_CITRGFMT);
+       *swap = (cfg & EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE) ? 1 : 0;
+
+       return 0;
+}
+
+static int fimc_get_ratio_shift(u32 src, u32 dst, u32 *ratio, u32 *shift)
+{
+       DRM_DEBUG_KMS("%s:src[%d]dst[%d]\n", __func__, src, dst);
+
+       if (src >= dst * 64) {
+               DRM_ERROR("failed to make ratio and shift.\n");
+               return -EINVAL;
+       } else if (src >= dst * 32) {
+               *ratio = 32;
+               *shift = 5;
+       } else if (src >= dst * 16) {
+               *ratio = 16;
+               *shift = 4;
+       } else if (src >= dst * 8) {
+               *ratio = 8;
+               *shift = 3;
+       } else if (src >= dst * 4) {
+               *ratio = 4;
+               *shift = 2;
+       } else if (src >= dst * 2) {
+               *ratio = 2;
+               *shift = 1;
+       } else {
+               *ratio = 1;
+               *shift = 0;
+       }
+
+       return 0;
+}
+
+static int fimc_set_prescaler(struct fimc_context *ctx, struct fimc_scaler *sc,
+               struct drm_exynos_pos *src, struct drm_exynos_pos *dst)
+{
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       u32 cfg, cfg_ext, shfactor;
+       u32 pre_dst_width, pre_dst_height;
+       u32 pre_hratio, hfactor, pre_vratio, vfactor;
+       int ret = 0;
+       u32 src_w, src_h, dst_w, dst_h;
+
+       cfg_ext = fimc_read(EXYNOS_CITRGFMT);
+       if (cfg_ext & EXYNOS_CITRGFMT_INROT90_CLOCKWISE) {
+               src_w = src->h;
+               src_h = src->w;
+       } else {
+               src_w = src->w;
+               src_h = src->h;
+       }
+
+       if (cfg_ext & EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE) {
+               dst_w = dst->h;
+               dst_h = dst->w;
+       } else {
+               dst_w = dst->w;
+               dst_h = dst->h;
+       }
+
+       ret = fimc_get_ratio_shift(src_w, dst_w, &pre_hratio, &hfactor);
+       if (ret) {
+               dev_err(ippdrv->dev, "failed to get ratio horizontal.\n");
+               return ret;
+       }
+
+       ret = fimc_get_ratio_shift(src_h, dst_h, &pre_vratio, &vfactor);
+       if (ret) {
+               dev_err(ippdrv->dev, "failed to get ratio vertical.\n");
+               return ret;
+       }
+
+       pre_dst_width = src_w / pre_hratio;
+       pre_dst_height = src_h / pre_vratio;
+       DRM_DEBUG_KMS("%s:pre_dst_width[%d]pre_dst_height[%d]\n", __func__,
+               pre_dst_width, pre_dst_height);
+       DRM_DEBUG_KMS("%s:pre_hratio[%d]hfactor[%d]pre_vratio[%d]vfactor[%d]\n",
+               __func__, pre_hratio, hfactor, pre_vratio, vfactor);
+
+       sc->hratio = (src_w << 14) / (dst_w << hfactor);
+       sc->vratio = (src_h << 14) / (dst_h << vfactor);
+       sc->up_h = (dst_w >= src_w) ? true : false;
+       sc->up_v = (dst_h >= src_h) ? true : false;
+       DRM_DEBUG_KMS("%s:hratio[%d]vratio[%d]up_h[%d]up_v[%d]\n",
+       __func__, sc->hratio, sc->vratio, sc->up_h, sc->up_v);
+
+       shfactor = FIMC_SHFACTOR - (hfactor + vfactor);
+       DRM_DEBUG_KMS("%s:shfactor[%d]\n", __func__, shfactor);
+
+       cfg = (EXYNOS_CISCPRERATIO_SHFACTOR(shfactor) |
+               EXYNOS_CISCPRERATIO_PREHORRATIO(pre_hratio) |
+               EXYNOS_CISCPRERATIO_PREVERRATIO(pre_vratio));
+       fimc_write(cfg, EXYNOS_CISCPRERATIO);
+
+       cfg = (EXYNOS_CISCPREDST_PREDSTWIDTH(pre_dst_width) |
+               EXYNOS_CISCPREDST_PREDSTHEIGHT(pre_dst_height));
+       fimc_write(cfg, EXYNOS_CISCPREDST);
+
+       return ret;
+}
+
+static void fimc_set_scaler(struct fimc_context *ctx, struct fimc_scaler *sc)
+{
+       u32 cfg, cfg_ext;
+
+       DRM_DEBUG_KMS("%s:range[%d]bypass[%d]up_h[%d]up_v[%d]\n",
+               __func__, sc->range, sc->bypass, sc->up_h, sc->up_v);
+       DRM_DEBUG_KMS("%s:hratio[%d]vratio[%d]\n",
+               __func__, sc->hratio, sc->vratio);
+
+       cfg = fimc_read(EXYNOS_CISCCTRL);
+       cfg &= ~(EXYNOS_CISCCTRL_SCALERBYPASS |
+               EXYNOS_CISCCTRL_SCALEUP_H | EXYNOS_CISCCTRL_SCALEUP_V |
+               EXYNOS_CISCCTRL_MAIN_V_RATIO_MASK |
+               EXYNOS_CISCCTRL_MAIN_H_RATIO_MASK |
+               EXYNOS_CISCCTRL_CSCR2Y_WIDE |
+               EXYNOS_CISCCTRL_CSCY2R_WIDE);
+
+       if (sc->range)
+               cfg |= (EXYNOS_CISCCTRL_CSCR2Y_WIDE |
+                       EXYNOS_CISCCTRL_CSCY2R_WIDE);
+       if (sc->bypass)
+               cfg |= EXYNOS_CISCCTRL_SCALERBYPASS;
+       if (sc->up_h)
+               cfg |= EXYNOS_CISCCTRL_SCALEUP_H;
+       if (sc->up_v)
+               cfg |= EXYNOS_CISCCTRL_SCALEUP_V;
+
+       cfg |= (EXYNOS_CISCCTRL_MAINHORRATIO((sc->hratio >> 6)) |
+               EXYNOS_CISCCTRL_MAINVERRATIO((sc->vratio >> 6)));
+       fimc_write(cfg, EXYNOS_CISCCTRL);
+
+       cfg_ext = fimc_read(EXYNOS_CIEXTEN);
+       cfg_ext &= ~EXYNOS_CIEXTEN_MAINHORRATIO_EXT_MASK;
+       cfg_ext &= ~EXYNOS_CIEXTEN_MAINVERRATIO_EXT_MASK;
+       cfg_ext |= (EXYNOS_CIEXTEN_MAINHORRATIO_EXT(sc->hratio) |
+               EXYNOS_CIEXTEN_MAINVERRATIO_EXT(sc->vratio));
+       fimc_write(cfg_ext, EXYNOS_CIEXTEN);
+}
+
+static int fimc_dst_set_size(struct device *dev, int swap,
+               struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
+{
+       struct fimc_context *ctx = get_fimc_context(dev);
+       struct drm_exynos_pos img_pos = *pos;
+       struct drm_exynos_sz img_sz = *sz;
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:swap[%d]hsize[%d]vsize[%d]\n",
+               __func__, swap, sz->hsize, sz->vsize);
+
+       /* original size */
+       cfg = (EXYNOS_ORGOSIZE_HORIZONTAL(img_sz.hsize) |
+               EXYNOS_ORGOSIZE_VERTICAL(img_sz.vsize));
+
+       fimc_write(cfg, EXYNOS_ORGOSIZE);
+
+       DRM_DEBUG_KMS("%s:x[%d]y[%d]w[%d]h[%d]\n",
+               __func__, pos->x, pos->y, pos->w, pos->h);
+
+       /* CSC ITU */
+       cfg = fimc_read(EXYNOS_CIGCTRL);
+       cfg &= ~EXYNOS_CIGCTRL_CSC_MASK;
+
+       if (sz->hsize >= FIMC_WIDTH_ITU_709)
+               cfg |= EXYNOS_CIGCTRL_CSC_ITU709;
+       else
+               cfg |= EXYNOS_CIGCTRL_CSC_ITU601;
+
+       fimc_write(cfg, EXYNOS_CIGCTRL);
+
+       if (swap) {
+               img_pos.w = pos->h;
+               img_pos.h = pos->w;
+               img_sz.hsize = sz->vsize;
+               img_sz.vsize = sz->hsize;
+       }
+
+       /* target image size */
+       cfg = fimc_read(EXYNOS_CITRGFMT);
+       cfg &= ~(EXYNOS_CITRGFMT_TARGETH_MASK |
+               EXYNOS_CITRGFMT_TARGETV_MASK);
+       cfg |= (EXYNOS_CITRGFMT_TARGETHSIZE(img_pos.w) |
+               EXYNOS_CITRGFMT_TARGETVSIZE(img_pos.h));
+       fimc_write(cfg, EXYNOS_CITRGFMT);
+
+       /* target area */
+       cfg = EXYNOS_CITAREA_TARGET_AREA(img_pos.w * img_pos.h);
+       fimc_write(cfg, EXYNOS_CITAREA);
+
+       /* offset Y(RGB), Cb, Cr */
+       cfg = (EXYNOS_CIOYOFF_HORIZONTAL(img_pos.x) |
+               EXYNOS_CIOYOFF_VERTICAL(img_pos.y));
+       fimc_write(cfg, EXYNOS_CIOYOFF);
+       cfg = (EXYNOS_CIOCBOFF_HORIZONTAL(img_pos.x) |
+               EXYNOS_CIOCBOFF_VERTICAL(img_pos.y));
+       fimc_write(cfg, EXYNOS_CIOCBOFF);
+       cfg = (EXYNOS_CIOCROFF_HORIZONTAL(img_pos.x) |
+               EXYNOS_CIOCROFF_VERTICAL(img_pos.y));
+       fimc_write(cfg, EXYNOS_CIOCROFF);
+
+       return 0;
+}
+
+static int fimc_dst_get_buf_seq(struct fimc_context *ctx)
+{
+       u32 cfg, i, buf_num = 0;
+       u32 mask = 0x00000001;
+
+       cfg = fimc_read(EXYNOS_CIFCNTSEQ);
+
+       for (i = 0; i < FIMC_REG_SZ; i++)
+               if (cfg & (mask << i))
+                       buf_num++;
+
+       DRM_DEBUG_KMS("%s:buf_num[%d]\n", __func__, buf_num);
+
+       return buf_num;
+}
+
+static int fimc_dst_set_buf_seq(struct fimc_context *ctx, u32 buf_id,
+               enum drm_exynos_ipp_buf_type buf_type)
+{
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       bool enable;
+       u32 cfg;
+       u32 mask = 0x00000001 << buf_id;
+       int ret = 0;
+
+       DRM_DEBUG_KMS("%s:buf_id[%d]buf_type[%d]\n", __func__,
+               buf_id, buf_type);
+
+       mutex_lock(&ctx->lock);
+
+       /* mask register set */
+       cfg = fimc_read(EXYNOS_CIFCNTSEQ);
+
+       switch (buf_type) {
+       case IPP_BUF_ENQUEUE:
+               enable = true;
+               break;
+       case IPP_BUF_DEQUEUE:
+               enable = false;
+               break;
+       default:
+               dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n");
+               ret =  -EINVAL;
+               goto err_unlock;
+       }
+
+       /* sequence id */
+       cfg &= (~mask);
+       cfg |= (enable << buf_id);
+       fimc_write(cfg, EXYNOS_CIFCNTSEQ);
+
+       /* interrupt enable */
+       if (buf_type == IPP_BUF_ENQUEUE &&
+           fimc_dst_get_buf_seq(ctx) >= FIMC_BUF_START)
+               fimc_handle_irq(ctx, true, false, true);
+
+       /* interrupt disable */
+       if (buf_type == IPP_BUF_DEQUEUE &&
+           fimc_dst_get_buf_seq(ctx) <= FIMC_BUF_STOP)
+               fimc_handle_irq(ctx, false, false, true);
+
+err_unlock:
+       mutex_unlock(&ctx->lock);
+       return ret;
+}
+
+static int fimc_dst_set_addr(struct device *dev,
+               struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
+               enum drm_exynos_ipp_buf_type buf_type)
+{
+       struct fimc_context *ctx = get_fimc_context(dev);
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
+       struct drm_exynos_ipp_property *property;
+       struct drm_exynos_ipp_config *config;
+
+       if (!c_node) {
+               DRM_ERROR("failed to get c_node.\n");
+               return -EINVAL;
+       }
+
+       property = &c_node->property;
+       if (!property) {
+               DRM_ERROR("failed to get property.\n");
+               return -EINVAL;
+       }
+
+       DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__,
+               property->prop_id, buf_id, buf_type);
+
+       if (buf_id > FIMC_MAX_DST) {
+               dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
+               return -ENOMEM;
+       }
+
+       /* address register set */
+       switch (buf_type) {
+       case IPP_BUF_ENQUEUE:
+               config = &property->config[EXYNOS_DRM_OPS_DST];
+
+               fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
+                       EXYNOS_CIOYSA(buf_id));
+
+               if (config->fmt == DRM_FORMAT_YVU420) {
+                       fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+                               EXYNOS_CIOCBSA(buf_id));
+                       fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+                               EXYNOS_CIOCRSA(buf_id));
+               } else {
+                       fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+                               EXYNOS_CIOCBSA(buf_id));
+                       fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+                               EXYNOS_CIOCRSA(buf_id));
+               }
+               break;
+       case IPP_BUF_DEQUEUE:
+               fimc_write(0x0, EXYNOS_CIOYSA(buf_id));
+               fimc_write(0x0, EXYNOS_CIOCBSA(buf_id));
+               fimc_write(0x0, EXYNOS_CIOCRSA(buf_id));
+               break;
+       default:
+               /* bypass */
+               break;
+       }
+
+       return fimc_dst_set_buf_seq(ctx, buf_id, buf_type);
+}
+
+static struct exynos_drm_ipp_ops fimc_dst_ops = {
+       .set_fmt = fimc_dst_set_fmt,
+       .set_transf = fimc_dst_set_transf,
+       .set_size = fimc_dst_set_size,
+       .set_addr = fimc_dst_set_addr,
+};
+
+static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable)
+{
+       DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
+
+       if (enable) {
+               clk_enable(ctx->sclk_fimc_clk);
+               clk_enable(ctx->fimc_clk);
+               clk_enable(ctx->wb_clk);
+               ctx->suspended = false;
+       } else {
+               clk_disable(ctx->sclk_fimc_clk);
+               clk_disable(ctx->fimc_clk);
+               clk_disable(ctx->wb_clk);
+               ctx->suspended = true;
+       }
+
+       return 0;
+}
+
+static irqreturn_t fimc_irq_handler(int irq, void *dev_id)
+{
+       struct fimc_context *ctx = dev_id;
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
+       struct drm_exynos_ipp_event_work *event_work =
+               c_node->event_work;
+       int buf_id;
+
+       DRM_DEBUG_KMS("%s:fimc id[%d]\n", __func__, ctx->id);
+
+       fimc_clear_irq(ctx);
+       if (fimc_check_ovf(ctx))
+               return IRQ_NONE;
+
+       if (!fimc_check_frame_end(ctx))
+               return IRQ_NONE;
+
+       buf_id = fimc_get_buf_id(ctx);
+       if (buf_id < 0)
+               return IRQ_HANDLED;
+
+       DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, buf_id);
+
+       if (fimc_dst_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE) < 0) {
+               DRM_ERROR("failed to dequeue.\n");
+               return IRQ_HANDLED;
+       }
+
+       event_work->ippdrv = ippdrv;
+       event_work->buf_id[EXYNOS_DRM_OPS_DST] = buf_id;
+       queue_work(ippdrv->event_workq, (struct work_struct *)event_work);
+
+       return IRQ_HANDLED;
+}
+
+static int fimc_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
+{
+       struct drm_exynos_ipp_prop_list *prop_list;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
+       if (!prop_list) {
+               DRM_ERROR("failed to alloc property list.\n");
+               return -ENOMEM;
+       }
+
+       prop_list->version = 1;
+       prop_list->writeback = 1;
+       prop_list->refresh_min = FIMC_REFRESH_MIN;
+       prop_list->refresh_max = FIMC_REFRESH_MAX;
+       prop_list->flip = (1 << EXYNOS_DRM_FLIP_NONE) |
+                               (1 << EXYNOS_DRM_FLIP_VERTICAL) |
+                               (1 << EXYNOS_DRM_FLIP_HORIZONTAL);
+       prop_list->degree = (1 << EXYNOS_DRM_DEGREE_0) |
+                               (1 << EXYNOS_DRM_DEGREE_90) |
+                               (1 << EXYNOS_DRM_DEGREE_180) |
+                               (1 << EXYNOS_DRM_DEGREE_270);
+       prop_list->csc = 1;
+       prop_list->crop = 1;
+       prop_list->crop_max.hsize = FIMC_CROP_MAX;
+       prop_list->crop_max.vsize = FIMC_CROP_MAX;
+       prop_list->crop_min.hsize = FIMC_CROP_MIN;
+       prop_list->crop_min.vsize = FIMC_CROP_MIN;
+       prop_list->scale = 1;
+       prop_list->scale_max.hsize = FIMC_SCALE_MAX;
+       prop_list->scale_max.vsize = FIMC_SCALE_MAX;
+       prop_list->scale_min.hsize = FIMC_SCALE_MIN;
+       prop_list->scale_min.vsize = FIMC_SCALE_MIN;
+
+       ippdrv->prop_list = prop_list;
+
+       return 0;
+}
+
+static inline bool fimc_check_drm_flip(enum drm_exynos_flip flip)
+{
+       switch (flip) {
+       case EXYNOS_DRM_FLIP_NONE:
+       case EXYNOS_DRM_FLIP_VERTICAL:
+       case EXYNOS_DRM_FLIP_HORIZONTAL:
+               return true;
+       default:
+               DRM_DEBUG_KMS("%s:invalid flip\n", __func__);
+               return false;
+       }
+}
+
+static int fimc_ippdrv_check_property(struct device *dev,
+               struct drm_exynos_ipp_property *property)
+{
+       struct fimc_context *ctx = get_fimc_context(dev);
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       struct drm_exynos_ipp_prop_list *pp = ippdrv->prop_list;
+       struct drm_exynos_ipp_config *config;
+       struct drm_exynos_pos *pos;
+       struct drm_exynos_sz *sz;
+       bool swap;
+       int i;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       for_each_ipp_ops(i) {
+               if ((i == EXYNOS_DRM_OPS_SRC) &&
+                       (property->cmd == IPP_CMD_WB))
+                       continue;
+
+               config = &property->config[i];
+               pos = &config->pos;
+               sz = &config->sz;
+
+               /* check for flip */
+               if (!fimc_check_drm_flip(config->flip)) {
+                       DRM_ERROR("invalid flip.\n");
+                       goto err_property;
+               }
+
+               /* check for degree */
+               switch (config->degree) {
+               case EXYNOS_DRM_DEGREE_90:
+               case EXYNOS_DRM_DEGREE_270:
+                       swap = true;
+                       break;
+               case EXYNOS_DRM_DEGREE_0:
+               case EXYNOS_DRM_DEGREE_180:
+                       swap = false;
+                       break;
+               default:
+                       DRM_ERROR("invalid degree.\n");
+                       goto err_property;
+               }
+
+               /* check for buffer bound */
+               if ((pos->x + pos->w > sz->hsize) ||
+                       (pos->y + pos->h > sz->vsize)) {
+                       DRM_ERROR("out of buf bound.\n");
+                       goto err_property;
+               }
+
+               /* check for crop */
+               if ((i == EXYNOS_DRM_OPS_SRC) && (pp->crop)) {
+                       if (swap) {
+                               if ((pos->h < pp->crop_min.hsize) ||
+                                       (sz->vsize > pp->crop_max.hsize) ||
+                                       (pos->w < pp->crop_min.vsize) ||
+                                       (sz->hsize > pp->crop_max.vsize)) {
+                                       DRM_ERROR("out of crop size.\n");
+                                       goto err_property;
+                               }
+                       } else {
+                               if ((pos->w < pp->crop_min.hsize) ||
+                                       (sz->hsize > pp->crop_max.hsize) ||
+                                       (pos->h < pp->crop_min.vsize) ||
+                                       (sz->vsize > pp->crop_max.vsize)) {
+                                       DRM_ERROR("out of crop size.\n");
+                                       goto err_property;
+                               }
+                       }
+               }
+
+               /* check for scale */
+               if ((i == EXYNOS_DRM_OPS_DST) && (pp->scale)) {
+                       if (swap) {
+                               if ((pos->h < pp->scale_min.hsize) ||
+                                       (sz->vsize > pp->scale_max.hsize) ||
+                                       (pos->w < pp->scale_min.vsize) ||
+                                       (sz->hsize > pp->scale_max.vsize)) {
+                                       DRM_ERROR("out of scale size.\n");
+                                       goto err_property;
+                               }
+                       } else {
+                               if ((pos->w < pp->scale_min.hsize) ||
+                                       (sz->hsize > pp->scale_max.hsize) ||
+                                       (pos->h < pp->scale_min.vsize) ||
+                                       (sz->vsize > pp->scale_max.vsize)) {
+                                       DRM_ERROR("out of scale size.\n");
+                                       goto err_property;
+                               }
+                       }
+               }
+       }
+
+       return 0;
+
+err_property:
+       for_each_ipp_ops(i) {
+               if ((i == EXYNOS_DRM_OPS_SRC) &&
+                       (property->cmd == IPP_CMD_WB))
+                       continue;
+
+               config = &property->config[i];
+               pos = &config->pos;
+               sz = &config->sz;
+
+               DRM_ERROR("[%s]f[%d]r[%d]pos[%d %d %d %d]sz[%d %d]\n",
+                       i ? "dst" : "src", config->flip, config->degree,
+                       pos->x, pos->y, pos->w, pos->h,
+                       sz->hsize, sz->vsize);
+       }
+
+       return -EINVAL;
+}
+
+static void fimc_clear_addr(struct fimc_context *ctx)
+{
+       int i;
+
+       DRM_DEBUG_KMS("%s:\n", __func__);
+
+       for (i = 0; i < FIMC_MAX_SRC; i++) {
+               fimc_write(0, EXYNOS_CIIYSA(i));
+               fimc_write(0, EXYNOS_CIICBSA(i));
+               fimc_write(0, EXYNOS_CIICRSA(i));
+       }
+
+       for (i = 0; i < FIMC_MAX_DST; i++) {
+               fimc_write(0, EXYNOS_CIOYSA(i));
+               fimc_write(0, EXYNOS_CIOCBSA(i));
+               fimc_write(0, EXYNOS_CIOCRSA(i));
+       }
+}
+
+static int fimc_ippdrv_reset(struct device *dev)
+{
+       struct fimc_context *ctx = get_fimc_context(dev);
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       /* reset h/w block */
+       fimc_sw_reset(ctx, false);
+
+       /* reset scaler capability */
+       memset(&ctx->sc, 0x0, sizeof(ctx->sc));
+
+       fimc_clear_addr(ctx);
+
+       return 0;
+}
+
+static int fimc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
+{
+       struct fimc_context *ctx = get_fimc_context(dev);
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
+       struct drm_exynos_ipp_property *property;
+       struct drm_exynos_ipp_config *config;
+       struct drm_exynos_pos   img_pos[EXYNOS_DRM_OPS_MAX];
+       struct drm_exynos_ipp_set_wb set_wb;
+       int ret, i;
+       u32 cfg0, cfg1;
+
+       DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd);
+
+       if (!c_node) {
+               DRM_ERROR("failed to get c_node.\n");
+               return -EINVAL;
+       }
+
+       property = &c_node->property;
+       if (!property) {
+               DRM_ERROR("failed to get property.\n");
+               return -EINVAL;
+       }
+
+       fimc_handle_irq(ctx, true, false, true);
+
+       for_each_ipp_ops(i) {
+               config = &property->config[i];
+               img_pos[i] = config->pos;
+       }
+
+       ret = fimc_set_prescaler(ctx, &ctx->sc,
+               &img_pos[EXYNOS_DRM_OPS_SRC],
+               &img_pos[EXYNOS_DRM_OPS_DST]);
+       if (ret) {
+               dev_err(dev, "failed to set precalser.\n");
+               return ret;
+       }
+
+       /* If set ture, we can save jpeg about screen */
+       fimc_handle_jpeg(ctx, false);
+       fimc_set_scaler(ctx, &ctx->sc);
+       fimc_set_polarity(ctx, &ctx->pol);
+
+       switch (cmd) {
+       case IPP_CMD_M2M:
+               fimc_set_type_ctrl(ctx, FIMC_WB_NONE);
+               fimc_handle_lastend(ctx, false);
+
+               /* setup dma */
+               cfg0 = fimc_read(EXYNOS_MSCTRL);
+               cfg0 &= ~EXYNOS_MSCTRL_INPUT_MASK;
+               cfg0 |= EXYNOS_MSCTRL_INPUT_MEMORY;
+               fimc_write(cfg0, EXYNOS_MSCTRL);
+               break;
+       case IPP_CMD_WB:
+               fimc_set_type_ctrl(ctx, FIMC_WB_A);
+               fimc_handle_lastend(ctx, true);
+
+               /* setup FIMD */
+               fimc_set_camblk_fimd0_wb(ctx);
+
+               set_wb.enable = 1;
+               set_wb.refresh = property->refresh_rate;
+               exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
+               break;
+       case IPP_CMD_OUTPUT:
+       default:
+               ret = -EINVAL;
+               dev_err(dev, "invalid operations.\n");
+               return ret;
+       }
+
+       /* Reset status */
+       fimc_write(0x0, EXYNOS_CISTATUS);
+
+       cfg0 = fimc_read(EXYNOS_CIIMGCPT);
+       cfg0 &= ~EXYNOS_CIIMGCPT_IMGCPTEN_SC;
+       cfg0 |= EXYNOS_CIIMGCPT_IMGCPTEN_SC;
+
+       /* Scaler */
+       cfg1 = fimc_read(EXYNOS_CISCCTRL);
+       cfg1 &= ~EXYNOS_CISCCTRL_SCAN_MASK;
+       cfg1 |= (EXYNOS_CISCCTRL_PROGRESSIVE |
+               EXYNOS_CISCCTRL_SCALERSTART);
+
+       fimc_write(cfg1, EXYNOS_CISCCTRL);
+
+       /* Enable image capture*/
+       cfg0 |= EXYNOS_CIIMGCPT_IMGCPTEN;
+       fimc_write(cfg0, EXYNOS_CIIMGCPT);
+
+       /* Disable frame end irq */
+       cfg0 = fimc_read(EXYNOS_CIGCTRL);
+       cfg0 &= ~EXYNOS_CIGCTRL_IRQ_END_DISABLE;
+       fimc_write(cfg0, EXYNOS_CIGCTRL);
+
+       cfg0 = fimc_read(EXYNOS_CIOCTRL);
+       cfg0 &= ~EXYNOS_CIOCTRL_WEAVE_MASK;
+       fimc_write(cfg0, EXYNOS_CIOCTRL);
+
+       if (cmd == IPP_CMD_M2M) {
+               cfg0 = fimc_read(EXYNOS_MSCTRL);
+               cfg0 |= EXYNOS_MSCTRL_ENVID;
+               fimc_write(cfg0, EXYNOS_MSCTRL);
+
+               cfg0 = fimc_read(EXYNOS_MSCTRL);
+               cfg0 |= EXYNOS_MSCTRL_ENVID;
+               fimc_write(cfg0, EXYNOS_MSCTRL);
+       }
+
+       return 0;
+}
+
+static void fimc_ippdrv_stop(struct device *dev, enum drm_exynos_ipp_cmd cmd)
+{
+       struct fimc_context *ctx = get_fimc_context(dev);
+       struct drm_exynos_ipp_set_wb set_wb = {0, 0};
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd);
+
+       switch (cmd) {
+       case IPP_CMD_M2M:
+               /* Source clear */
+               cfg = fimc_read(EXYNOS_MSCTRL);
+               cfg &= ~EXYNOS_MSCTRL_INPUT_MASK;
+               cfg &= ~EXYNOS_MSCTRL_ENVID;
+               fimc_write(cfg, EXYNOS_MSCTRL);
+               break;
+       case IPP_CMD_WB:
+               exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
+               break;
+       case IPP_CMD_OUTPUT:
+       default:
+               dev_err(dev, "invalid operations.\n");
+               break;
+       }
+
+       fimc_handle_irq(ctx, false, false, true);
+
+       /* reset sequence */
+       fimc_write(0x0, EXYNOS_CIFCNTSEQ);
+
+       /* Scaler disable */
+       cfg = fimc_read(EXYNOS_CISCCTRL);
+       cfg &= ~EXYNOS_CISCCTRL_SCALERSTART;
+       fimc_write(cfg, EXYNOS_CISCCTRL);
+
+       /* Disable image capture */
+       cfg = fimc_read(EXYNOS_CIIMGCPT);
+       cfg &= ~(EXYNOS_CIIMGCPT_IMGCPTEN_SC | EXYNOS_CIIMGCPT_IMGCPTEN);
+       fimc_write(cfg, EXYNOS_CIIMGCPT);
+
+       /* Enable frame end irq */
+       cfg = fimc_read(EXYNOS_CIGCTRL);
+       cfg |= EXYNOS_CIGCTRL_IRQ_END_DISABLE;
+       fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static int __devinit fimc_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct fimc_context *ctx;
+       struct clk      *parent_clk;
+       struct resource *res;
+       struct exynos_drm_ippdrv *ippdrv;
+       struct exynos_drm_fimc_pdata *pdata;
+       struct fimc_driverdata *ddata;
+       int ret;
+
+       pdata = pdev->dev.platform_data;
+       if (!pdata) {
+               dev_err(dev, "no platform data specified.\n");
+               return -EINVAL;
+       }
+
+       ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+       if (!ctx)
+               return -ENOMEM;
+
+       ddata = (struct fimc_driverdata *)
+               platform_get_device_id(pdev)->driver_data;
+
+       /* clock control */
+       ctx->sclk_fimc_clk = clk_get(dev, "sclk_fimc");
+       if (IS_ERR(ctx->sclk_fimc_clk)) {
+               dev_err(dev, "failed to get src fimc clock.\n");
+               ret = PTR_ERR(ctx->sclk_fimc_clk);
+               goto err_ctx;
+       }
+       clk_enable(ctx->sclk_fimc_clk);
+
+       ctx->fimc_clk = clk_get(dev, "fimc");
+       if (IS_ERR(ctx->fimc_clk)) {
+               dev_err(dev, "failed to get fimc clock.\n");
+               ret = PTR_ERR(ctx->fimc_clk);
+               clk_disable(ctx->sclk_fimc_clk);
+               clk_put(ctx->sclk_fimc_clk);
+               goto err_ctx;
+       }
+
+       ctx->wb_clk = clk_get(dev, "pxl_async0");
+       if (IS_ERR(ctx->wb_clk)) {
+               dev_err(dev, "failed to get writeback a clock.\n");
+               ret = PTR_ERR(ctx->wb_clk);
+               clk_disable(ctx->sclk_fimc_clk);
+               clk_put(ctx->sclk_fimc_clk);
+               clk_put(ctx->fimc_clk);
+               goto err_ctx;
+       }
+
+       ctx->wb_b_clk = clk_get(dev, "pxl_async1");
+       if (IS_ERR(ctx->wb_b_clk)) {
+               dev_err(dev, "failed to get writeback b clock.\n");
+               ret = PTR_ERR(ctx->wb_b_clk);
+               clk_disable(ctx->sclk_fimc_clk);
+               clk_put(ctx->sclk_fimc_clk);
+               clk_put(ctx->fimc_clk);
+               clk_put(ctx->wb_clk);
+               goto err_ctx;
+       }
+
+       parent_clk = clk_get(dev, ddata->parent_clk);
+
+       if (IS_ERR(parent_clk)) {
+               dev_err(dev, "failed to get parent clock.\n");
+               ret = PTR_ERR(parent_clk);
+               clk_disable(ctx->sclk_fimc_clk);
+               clk_put(ctx->sclk_fimc_clk);
+               clk_put(ctx->fimc_clk);
+               clk_put(ctx->wb_clk);
+               clk_put(ctx->wb_b_clk);
+               goto err_ctx;
+       }
+
+       if (clk_set_parent(ctx->sclk_fimc_clk, parent_clk)) {
+               dev_err(dev, "failed to set parent.\n");
+               ret = -EINVAL;
+               clk_put(parent_clk);
+               clk_disable(ctx->sclk_fimc_clk);
+               clk_put(ctx->sclk_fimc_clk);
+               clk_put(ctx->fimc_clk);
+               clk_put(ctx->wb_clk);
+               clk_put(ctx->wb_b_clk);
+               goto err_ctx;
+       }
+
+       clk_put(parent_clk);
+       clk_set_rate(ctx->sclk_fimc_clk, pdata->clk_rate);
+
+       /* resource memory */
+       ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!ctx->regs_res) {
+               dev_err(dev, "failed to find registers.\n");
+               ret = -ENOENT;
+               goto err_clk;
+       }
+
+       ctx->regs = devm_request_and_ioremap(dev, ctx->regs_res);
+       if (!ctx->regs) {
+               dev_err(dev, "failed to map registers.\n");
+               ret = -ENXIO;
+               goto err_clk;
+       }
+
+       /* resource irq */
+       res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+       if (!res) {
+               dev_err(dev, "failed to request irq resource.\n");
+               ret = -ENOENT;
+               goto err_get_regs;
+       }
+
+       ctx->irq = res->start;
+       ret = request_threaded_irq(ctx->irq, NULL, fimc_irq_handler,
+               IRQF_ONESHOT, "drm_fimc", ctx);
+       if (ret < 0) {
+               dev_err(dev, "failed to request irq.\n");
+               goto err_get_regs;
+       }
+
+       /* context initailization */
+       ctx->id = pdev->id;
+       ctx->pol = pdata->pol;
+       ctx->ddata = ddata;
+
+       ippdrv = &ctx->ippdrv;
+       ippdrv->dev = dev;
+       ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &fimc_src_ops;
+       ippdrv->ops[EXYNOS_DRM_OPS_DST] = &fimc_dst_ops;
+       ippdrv->check_property = fimc_ippdrv_check_property;
+       ippdrv->reset = fimc_ippdrv_reset;
+       ippdrv->start = fimc_ippdrv_start;
+       ippdrv->stop = fimc_ippdrv_stop;
+       ret = fimc_init_prop_list(ippdrv);
+       if (ret < 0) {
+               dev_err(dev, "failed to init property list.\n");
+               goto err_get_irq;
+       }
+
+       DRM_DEBUG_KMS("%s:id[%d]ippdrv[0x%x]\n", __func__, ctx->id,
+               (int)ippdrv);
+
+       mutex_init(&ctx->lock);
+       platform_set_drvdata(pdev, ctx);
+
+       pm_runtime_set_active(dev);
+       pm_runtime_enable(dev);
+
+       ret = exynos_drm_ippdrv_register(ippdrv);
+       if (ret < 0) {
+               dev_err(dev, "failed to register drm fimc device.\n");
+               goto err_ippdrv_register;
+       }
+
+       dev_info(&pdev->dev, "drm fimc registered successfully.\n");
+
+       return 0;
+
+err_ippdrv_register:
+       devm_kfree(dev, ippdrv->prop_list);
+       pm_runtime_disable(dev);
+err_get_irq:
+       free_irq(ctx->irq, ctx);
+err_get_regs:
+       devm_iounmap(dev, ctx->regs);
+err_clk:
+       clk_put(ctx->sclk_fimc_clk);
+       clk_put(ctx->fimc_clk);
+       clk_put(ctx->wb_clk);
+       clk_put(ctx->wb_b_clk);
+err_ctx:
+       devm_kfree(dev, ctx);
+       return ret;
+}
+
+static int __devexit fimc_remove(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct fimc_context *ctx = get_fimc_context(dev);
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+
+       devm_kfree(dev, ippdrv->prop_list);
+       exynos_drm_ippdrv_unregister(ippdrv);
+       mutex_destroy(&ctx->lock);
+
+       pm_runtime_set_suspended(dev);
+       pm_runtime_disable(dev);
+
+       free_irq(ctx->irq, ctx);
+       devm_iounmap(dev, ctx->regs);
+
+       clk_put(ctx->sclk_fimc_clk);
+       clk_put(ctx->fimc_clk);
+       clk_put(ctx->wb_clk);
+       clk_put(ctx->wb_b_clk);
+
+       devm_kfree(dev, ctx);
+
+       return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int fimc_suspend(struct device *dev)
+{
+       struct fimc_context *ctx = get_fimc_context(dev);
+
+       DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+       if (pm_runtime_suspended(dev))
+               return 0;
+
+       return fimc_clk_ctrl(ctx, false);
+}
+
+static int fimc_resume(struct device *dev)
+{
+       struct fimc_context *ctx = get_fimc_context(dev);
+
+       DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+       if (!pm_runtime_suspended(dev))
+               return fimc_clk_ctrl(ctx, true);
+
+       return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int fimc_runtime_suspend(struct device *dev)
+{
+       struct fimc_context *ctx = get_fimc_context(dev);
+
+       DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+       return  fimc_clk_ctrl(ctx, false);
+}
+
+static int fimc_runtime_resume(struct device *dev)
+{
+       struct fimc_context *ctx = get_fimc_context(dev);
+
+       DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+       return  fimc_clk_ctrl(ctx, true);
+}
+#endif
+
+static struct fimc_driverdata exynos4210_fimc_data = {
+       .parent_clk = "mout_mpll",
+};
+
+static struct fimc_driverdata exynos4410_fimc_data = {
+       .parent_clk = "mout_mpll_user",
+};
+
+static struct platform_device_id fimc_driver_ids[] = {
+       {
+               .name           = "exynos4210-fimc",
+               .driver_data    = (unsigned long)&exynos4210_fimc_data,
+       }, {
+               .name           = "exynos4412-fimc",
+               .driver_data    = (unsigned long)&exynos4410_fimc_data,
+       },
+       {},
+};
+MODULE_DEVICE_TABLE(platform, fimc_driver_ids);
+
+static const struct dev_pm_ops fimc_pm_ops = {
+       SET_SYSTEM_SLEEP_PM_OPS(fimc_suspend, fimc_resume)
+       SET_RUNTIME_PM_OPS(fimc_runtime_suspend, fimc_runtime_resume, NULL)
+};
+
+struct platform_driver fimc_driver = {
+       .probe          = fimc_probe,
+       .remove         = __devexit_p(fimc_remove),
+       .id_table       = fimc_driver_ids,
+       .driver         = {
+               .name   = "exynos-drm-fimc",
+               .owner  = THIS_MODULE,
+               .pm     = &fimc_pm_ops,
+       },
+};
+
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.h b/drivers/gpu/drm/exynos/exynos_drm_fimc.h
new file mode 100644 (file)
index 0000000..dc970fa
--- /dev/null
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ *
+ * Authors:
+ *     Eunchul Kim <chulspro.kim@samsung.com>
+ *     Jinyoung Jeon <jy0.jeon@samsung.com>
+ *     Sangmin Lee <lsmin.lee@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_DRM_FIMC_H_
+#define _EXYNOS_DRM_FIMC_H_
+
+/*
+ * TODO
+ * FIMD output interface notifier callback.
+ */
+
+#endif /* _EXYNOS_DRM_FIMC_H_ */
index e08478f..bf0d9ba 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/clk.h>
+#include <linux/of_device.h>
 #include <linux/pm_runtime.h>
 
 #include <video/samsung_fimd.h>
@@ -25,6 +26,7 @@
 #include "exynos_drm_drv.h"
 #include "exynos_drm_fbdev.h"
 #include "exynos_drm_crtc.h"
+#include "exynos_drm_iommu.h"
 
 /*
  * FIMD is stand for Fully Interactive Mobile Display and
@@ -78,10 +80,10 @@ struct fimd_win_data {
        unsigned int            fb_height;
        unsigned int            bpp;
        dma_addr_t              dma_addr;
-       void __iomem            *vaddr;
        unsigned int            buf_offsize;
        unsigned int            line_size;      /* bytes */
        bool                    enabled;
+       bool                    resume;
 };
 
 struct fimd_context {
@@ -99,13 +101,34 @@ struct fimd_context {
        u32                             vidcon1;
        bool                            suspended;
        struct mutex                    lock;
+       wait_queue_head_t               wait_vsync_queue;
+       atomic_t                        wait_vsync_event;
 
        struct exynos_drm_panel_info *panel;
 };
 
+#ifdef CONFIG_OF
+static const struct of_device_id fimd_driver_dt_match[] = {
+       { .compatible = "samsung,exynos4-fimd",
+         .data = &exynos4_fimd_driver_data },
+       { .compatible = "samsung,exynos5-fimd",
+         .data = &exynos5_fimd_driver_data },
+       {},
+};
+MODULE_DEVICE_TABLE(of, fimd_driver_dt_match);
+#endif
+
 static inline struct fimd_driver_data *drm_fimd_get_driver_data(
        struct platform_device *pdev)
 {
+#ifdef CONFIG_OF
+       const struct of_device_id *of_id =
+                       of_match_device(fimd_driver_dt_match, &pdev->dev);
+
+       if (of_id)
+               return (struct fimd_driver_data *)of_id->data;
+#endif
+
        return (struct fimd_driver_data *)
                platform_get_device_id(pdev)->driver_data;
 }
@@ -240,7 +263,9 @@ static void fimd_commit(struct device *dev)
 
        /* setup horizontal and vertical display size. */
        val = VIDTCON2_LINEVAL(timing->yres - 1) |
-              VIDTCON2_HOZVAL(timing->xres - 1);
+              VIDTCON2_HOZVAL(timing->xres - 1) |
+              VIDTCON2_LINEVAL_E(timing->yres - 1) |
+              VIDTCON2_HOZVAL_E(timing->xres - 1);
        writel(val, ctx->regs + driver_data->timing_base + VIDTCON2);
 
        /* setup clock source, clock divider, enable dma. */
@@ -307,12 +332,32 @@ static void fimd_disable_vblank(struct device *dev)
        }
 }
 
+static void fimd_wait_for_vblank(struct device *dev)
+{
+       struct fimd_context *ctx = get_fimd_context(dev);
+
+       if (ctx->suspended)
+               return;
+
+       atomic_set(&ctx->wait_vsync_event, 1);
+
+       /*
+        * wait for FIMD to signal VSYNC interrupt or return after
+        * timeout which is set to 50ms (refresh rate of 20).
+        */
+       if (!wait_event_timeout(ctx->wait_vsync_queue,
+                               !atomic_read(&ctx->wait_vsync_event),
+                               DRM_HZ/20))
+               DRM_DEBUG_KMS("vblank wait timed out.\n");
+}
+
 static struct exynos_drm_manager_ops fimd_manager_ops = {
        .dpms = fimd_dpms,
        .apply = fimd_apply,
        .commit = fimd_commit,
        .enable_vblank = fimd_enable_vblank,
        .disable_vblank = fimd_disable_vblank,
+       .wait_for_vblank = fimd_wait_for_vblank,
 };
 
 static void fimd_win_mode_set(struct device *dev,
@@ -351,7 +396,6 @@ static void fimd_win_mode_set(struct device *dev,
        win_data->fb_width = overlay->fb_width;
        win_data->fb_height = overlay->fb_height;
        win_data->dma_addr = overlay->dma_addr[0] + offset;
-       win_data->vaddr = overlay->vaddr[0] + offset;
        win_data->bpp = overlay->bpp;
        win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) *
                                (overlay->bpp >> 3);
@@ -361,9 +405,7 @@ static void fimd_win_mode_set(struct device *dev,
                        win_data->offset_x, win_data->offset_y);
        DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
                        win_data->ovl_width, win_data->ovl_height);
-       DRM_DEBUG_KMS("paddr = 0x%lx, vaddr = 0x%lx\n",
-                       (unsigned long)win_data->dma_addr,
-                       (unsigned long)win_data->vaddr);
+       DRM_DEBUG_KMS("paddr = 0x%lx\n", (unsigned long)win_data->dma_addr);
        DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n",
                        overlay->fb_width, overlay->crtc_width);
 }
@@ -451,6 +493,8 @@ static void fimd_win_commit(struct device *dev, int zpos)
        struct fimd_win_data *win_data;
        int win = zpos;
        unsigned long val, alpha, size;
+       unsigned int last_x;
+       unsigned int last_y;
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
@@ -496,24 +540,32 @@ static void fimd_win_commit(struct device *dev, int zpos)
 
        /* buffer size */
        val = VIDW_BUF_SIZE_OFFSET(win_data->buf_offsize) |
-               VIDW_BUF_SIZE_PAGEWIDTH(win_data->line_size);
+               VIDW_BUF_SIZE_PAGEWIDTH(win_data->line_size) |
+               VIDW_BUF_SIZE_OFFSET_E(win_data->buf_offsize) |
+               VIDW_BUF_SIZE_PAGEWIDTH_E(win_data->line_size);
        writel(val, ctx->regs + VIDWx_BUF_SIZE(win, 0));
 
        /* OSD position */
        val = VIDOSDxA_TOPLEFT_X(win_data->offset_x) |
-               VIDOSDxA_TOPLEFT_Y(win_data->offset_y);
+               VIDOSDxA_TOPLEFT_Y(win_data->offset_y) |
+               VIDOSDxA_TOPLEFT_X_E(win_data->offset_x) |
+               VIDOSDxA_TOPLEFT_Y_E(win_data->offset_y);
        writel(val, ctx->regs + VIDOSD_A(win));
 
-       val = VIDOSDxB_BOTRIGHT_X(win_data->offset_x +
-                                       win_data->ovl_width - 1) |
-               VIDOSDxB_BOTRIGHT_Y(win_data->offset_y +
-                                       win_data->ovl_height - 1);
+       last_x = win_data->offset_x + win_data->ovl_width;
+       if (last_x)
+               last_x--;
+       last_y = win_data->offset_y + win_data->ovl_height;
+       if (last_y)
+               last_y--;
+
+       val = VIDOSDxB_BOTRIGHT_X(last_x) | VIDOSDxB_BOTRIGHT_Y(last_y) |
+               VIDOSDxB_BOTRIGHT_X_E(last_x) | VIDOSDxB_BOTRIGHT_Y_E(last_y);
+
        writel(val, ctx->regs + VIDOSD_B(win));
 
        DRM_DEBUG_KMS("osd pos: tx = %d, ty = %d, bx = %d, by = %d\n",
-                       win_data->offset_x, win_data->offset_y,
-                       win_data->offset_x + win_data->ovl_width - 1,
-                       win_data->offset_y + win_data->ovl_height - 1);
+                       win_data->offset_x, win_data->offset_y, last_x, last_y);
 
        /* hardware window 0 doesn't support alpha channel. */
        if (win != 0) {
@@ -573,6 +625,12 @@ static void fimd_win_disable(struct device *dev, int zpos)
 
        win_data = &ctx->win_data[win];
 
+       if (ctx->suspended) {
+               /* do not resume this window*/
+               win_data->resume = false;
+               return;
+       }
+
        /* protect windows */
        val = readl(ctx->regs + SHADOWCON);
        val |= SHADOWCON_WINx_PROTECT(win);
@@ -592,22 +650,10 @@ static void fimd_win_disable(struct device *dev, int zpos)
        win_data->enabled = false;
 }
 
-static void fimd_wait_for_vblank(struct device *dev)
-{
-       struct fimd_context *ctx = get_fimd_context(dev);
-       int ret;
-
-       ret = wait_for((__raw_readl(ctx->regs + VIDCON1) &
-                                       VIDCON1_VSTATUS_VSYNC), 50);
-       if (ret < 0)
-               DRM_DEBUG_KMS("vblank wait timed out.\n");
-}
-
 static struct exynos_drm_overlay_ops fimd_overlay_ops = {
        .mode_set = fimd_win_mode_set,
        .commit = fimd_win_commit,
        .disable = fimd_win_disable,
-       .wait_for_vblank = fimd_wait_for_vblank,
 };
 
 static struct exynos_drm_manager fimd_manager = {
@@ -623,7 +669,6 @@ static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc)
        struct drm_pending_vblank_event *e, *t;
        struct timeval now;
        unsigned long flags;
-       bool is_checked = false;
 
        spin_lock_irqsave(&drm_dev->event_lock, flags);
 
@@ -633,8 +678,6 @@ static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc)
                if (crtc != e->pipe)
                        continue;
 
-               is_checked = true;
-
                do_gettimeofday(&now);
                e->event.sequence = 0;
                e->event.tv_sec = now.tv_sec;
@@ -642,22 +685,7 @@ static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc)
 
                list_move_tail(&e->base.link, &e->base.file_priv->event_list);
                wake_up_interruptible(&e->base.file_priv->event_wait);
-       }
-
-       if (is_checked) {
-               /*
-                * call drm_vblank_put only in case that drm_vblank_get was
-                * called.
-                */
-               if (atomic_read(&drm_dev->vblank_refcount[crtc]) > 0)
-                       drm_vblank_put(drm_dev, crtc);
-
-               /*
-                * don't off vblank if vblank_disable_allowed is 1,
-                * because vblank would be off by timer handler.
-                */
-               if (!drm_dev->vblank_disable_allowed)
-                       drm_vblank_off(drm_dev, crtc);
+               drm_vblank_put(drm_dev, crtc);
        }
 
        spin_unlock_irqrestore(&drm_dev->event_lock, flags);
@@ -684,6 +712,11 @@ static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
        drm_handle_vblank(drm_dev, manager->pipe);
        fimd_finish_pageflip(drm_dev, manager->pipe);
 
+       /* set wait vsync event to zero and wake up queue. */
+       if (atomic_read(&ctx->wait_vsync_event)) {
+               atomic_set(&ctx->wait_vsync_event, 0);
+               DRM_WAKEUP(&ctx->wait_vsync_queue);
+       }
 out:
        return IRQ_HANDLED;
 }
@@ -709,6 +742,10 @@ static int fimd_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
         */
        drm_dev->vblank_disable_allowed = 1;
 
+       /* attach this sub driver to iommu mapping if supported. */
+       if (is_drm_iommu_supported(drm_dev))
+               drm_iommu_attach_device(drm_dev, dev);
+
        return 0;
 }
 
@@ -716,7 +753,9 @@ static void fimd_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
 {
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
-       /* TODO. */
+       /* detach this sub driver from iommu mapping if supported. */
+       if (is_drm_iommu_supported(drm_dev))
+               drm_iommu_detach_device(drm_dev, dev);
 }
 
 static int fimd_calc_clkdiv(struct fimd_context *ctx,
@@ -805,11 +844,38 @@ static int fimd_clock(struct fimd_context *ctx, bool enable)
        return 0;
 }
 
+static void fimd_window_suspend(struct device *dev)
+{
+       struct fimd_context *ctx = get_fimd_context(dev);
+       struct fimd_win_data *win_data;
+       int i;
+
+       for (i = 0; i < WINDOWS_NR; i++) {
+               win_data = &ctx->win_data[i];
+               win_data->resume = win_data->enabled;
+               fimd_win_disable(dev, i);
+       }
+       fimd_wait_for_vblank(dev);
+}
+
+static void fimd_window_resume(struct device *dev)
+{
+       struct fimd_context *ctx = get_fimd_context(dev);
+       struct fimd_win_data *win_data;
+       int i;
+
+       for (i = 0; i < WINDOWS_NR; i++) {
+               win_data = &ctx->win_data[i];
+               win_data->enabled = win_data->resume;
+               win_data->resume = false;
+       }
+}
+
 static int fimd_activate(struct fimd_context *ctx, bool enable)
 {
+       struct device *dev = ctx->subdrv.dev;
        if (enable) {
                int ret;
-               struct device *dev = ctx->subdrv.dev;
 
                ret = fimd_clock(ctx, true);
                if (ret < 0)
@@ -820,7 +886,11 @@ static int fimd_activate(struct fimd_context *ctx, bool enable)
                /* if vblank was enabled status, enable it again. */
                if (test_and_clear_bit(0, &ctx->irq_flags))
                        fimd_enable_vblank(dev);
+
+               fimd_window_resume(dev);
        } else {
+               fimd_window_suspend(dev);
+
                fimd_clock(ctx, false);
                ctx->suspended = true;
        }
@@ -857,18 +927,16 @@ static int __devinit fimd_probe(struct platform_device *pdev)
        if (!ctx)
                return -ENOMEM;
 
-       ctx->bus_clk = clk_get(dev, "fimd");
+       ctx->bus_clk = devm_clk_get(dev, "fimd");
        if (IS_ERR(ctx->bus_clk)) {
                dev_err(dev, "failed to get bus clock\n");
-               ret = PTR_ERR(ctx->bus_clk);
-               goto err_clk_get;
+               return PTR_ERR(ctx->bus_clk);
        }
 
-       ctx->lcd_clk = clk_get(dev, "sclk_fimd");
+       ctx->lcd_clk = devm_clk_get(dev, "sclk_fimd");
        if (IS_ERR(ctx->lcd_clk)) {
                dev_err(dev, "failed to get lcd clock\n");
-               ret = PTR_ERR(ctx->lcd_clk);
-               goto err_bus_clk;
+               return PTR_ERR(ctx->lcd_clk);
        }
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -876,14 +944,13 @@ static int __devinit fimd_probe(struct platform_device *pdev)
        ctx->regs = devm_request_and_ioremap(&pdev->dev, res);
        if (!ctx->regs) {
                dev_err(dev, "failed to map registers\n");
-               ret = -ENXIO;
-               goto err_clk;
+               return -ENXIO;
        }
 
        res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
        if (!res) {
                dev_err(dev, "irq request failed.\n");
-               goto err_clk;
+               return -ENXIO;
        }
 
        ctx->irq = res->start;
@@ -892,13 +959,15 @@ static int __devinit fimd_probe(struct platform_device *pdev)
                                                        0, "drm_fimd", ctx);
        if (ret) {
                dev_err(dev, "irq request failed.\n");
-               goto err_clk;
+               return ret;
        }
 
        ctx->vidcon0 = pdata->vidcon0;
        ctx->vidcon1 = pdata->vidcon1;
        ctx->default_win = pdata->default_win;
        ctx->panel = panel;
+       DRM_INIT_WAITQUEUE(&ctx->wait_vsync_queue);
+       atomic_set(&ctx->wait_vsync_event, 0);
 
        subdrv = &ctx->subdrv;
 
@@ -926,17 +995,6 @@ static int __devinit fimd_probe(struct platform_device *pdev)
        exynos_drm_subdrv_register(subdrv);
 
        return 0;
-
-err_clk:
-       clk_disable(ctx->lcd_clk);
-       clk_put(ctx->lcd_clk);
-
-err_bus_clk:
-       clk_disable(ctx->bus_clk);
-       clk_put(ctx->bus_clk);
-
-err_clk_get:
-       return ret;
 }
 
 static int __devexit fimd_remove(struct platform_device *pdev)
@@ -960,9 +1018,6 @@ static int __devexit fimd_remove(struct platform_device *pdev)
 out:
        pm_runtime_disable(dev);
 
-       clk_put(ctx->lcd_clk);
-       clk_put(ctx->bus_clk);
-
        return 0;
 }
 
@@ -1056,5 +1111,6 @@ struct platform_driver fimd_driver = {
                .name   = "exynos4-fb",
                .owner  = THIS_MODULE,
                .pm     = &fimd_pm_ops,
+               .of_match_table = of_match_ptr(fimd_driver_dt_match),
        },
 };
index f7aab24..6ffa076 100644 (file)
 #include <linux/pm_runtime.h>
 #include <linux/slab.h>
 #include <linux/workqueue.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma-attrs.h>
 
 #include <drm/drmP.h>
 #include <drm/exynos_drm.h>
 #include "exynos_drm_drv.h"
 #include "exynos_drm_gem.h"
+#include "exynos_drm_iommu.h"
 
 #define G2D_HW_MAJOR_VER               4
 #define G2D_HW_MINOR_VER               1
 #define G2D_CMDLIST_POOL_SIZE          (G2D_CMDLIST_SIZE * G2D_CMDLIST_NUM)
 #define G2D_CMDLIST_DATA_NUM           (G2D_CMDLIST_SIZE / sizeof(u32) - 2)
 
+#define MAX_BUF_ADDR_NR                        6
+
+/* maximum buffer pool size of userptr is 64MB as default */
+#define MAX_POOL               (64 * 1024 * 1024)
+
+enum {
+       BUF_TYPE_GEM = 1,
+       BUF_TYPE_USERPTR,
+};
+
 /* cmdlist data structure */
 struct g2d_cmdlist {
-       u32     head;
-       u32     data[G2D_CMDLIST_DATA_NUM];
-       u32     last;   /* last data offset */
+       u32             head;
+       unsigned long   data[G2D_CMDLIST_DATA_NUM];
+       u32             last;   /* last data offset */
 };
 
 struct drm_exynos_pending_g2d_event {
@@ -104,15 +117,26 @@ struct drm_exynos_pending_g2d_event {
        struct drm_exynos_g2d_event     event;
 };
 
-struct g2d_gem_node {
+struct g2d_cmdlist_userptr {
        struct list_head        list;
-       unsigned int            handle;
+       dma_addr_t              dma_addr;
+       unsigned long           userptr;
+       unsigned long           size;
+       struct page             **pages;
+       unsigned int            npages;
+       struct sg_table         *sgt;
+       struct vm_area_struct   *vma;
+       atomic_t                refcount;
+       bool                    in_pool;
+       bool                    out_of_list;
 };
 
 struct g2d_cmdlist_node {
        struct list_head        list;
        struct g2d_cmdlist      *cmdlist;
-       unsigned int            gem_nr;
+       unsigned int            map_nr;
+       unsigned long           handles[MAX_BUF_ADDR_NR];
+       unsigned int            obj_type[MAX_BUF_ADDR_NR];
        dma_addr_t              dma_addr;
 
        struct drm_exynos_pending_g2d_event     *event;
@@ -122,6 +146,7 @@ struct g2d_runqueue_node {
        struct list_head        list;
        struct list_head        run_cmdlist;
        struct list_head        event_list;
+       struct drm_file         *filp;
        pid_t                   pid;
        struct completion       complete;
        int                     async;
@@ -143,23 +168,33 @@ struct g2d_data {
        struct mutex                    cmdlist_mutex;
        dma_addr_t                      cmdlist_pool;
        void                            *cmdlist_pool_virt;
+       struct dma_attrs                cmdlist_dma_attrs;
 
        /* runqueue*/
        struct g2d_runqueue_node        *runqueue_node;
        struct list_head                runqueue;
        struct mutex                    runqueue_mutex;
        struct kmem_cache               *runqueue_slab;
+
+       unsigned long                   current_pool;
+       unsigned long                   max_pool;
 };
 
 static int g2d_init_cmdlist(struct g2d_data *g2d)
 {
        struct device *dev = g2d->dev;
        struct g2d_cmdlist_node *node = g2d->cmdlist_node;
+       struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
        int nr;
        int ret;
 
-       g2d->cmdlist_pool_virt = dma_alloc_coherent(dev, G2D_CMDLIST_POOL_SIZE,
-                                               &g2d->cmdlist_pool, GFP_KERNEL);
+       init_dma_attrs(&g2d->cmdlist_dma_attrs);
+       dma_set_attr(DMA_ATTR_WRITE_COMBINE, &g2d->cmdlist_dma_attrs);
+
+       g2d->cmdlist_pool_virt = dma_alloc_attrs(subdrv->drm_dev->dev,
+                                               G2D_CMDLIST_POOL_SIZE,
+                                               &g2d->cmdlist_pool, GFP_KERNEL,
+                                               &g2d->cmdlist_dma_attrs);
        if (!g2d->cmdlist_pool_virt) {
                dev_err(dev, "failed to allocate dma memory\n");
                return -ENOMEM;
@@ -184,18 +219,20 @@ static int g2d_init_cmdlist(struct g2d_data *g2d)
        return 0;
 
 err:
-       dma_free_coherent(dev, G2D_CMDLIST_POOL_SIZE, g2d->cmdlist_pool_virt,
-                       g2d->cmdlist_pool);
+       dma_free_attrs(subdrv->drm_dev->dev, G2D_CMDLIST_POOL_SIZE,
+                       g2d->cmdlist_pool_virt,
+                       g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs);
        return ret;
 }
 
 static void g2d_fini_cmdlist(struct g2d_data *g2d)
 {
-       struct device *dev = g2d->dev;
+       struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
 
        kfree(g2d->cmdlist_node);
-       dma_free_coherent(dev, G2D_CMDLIST_POOL_SIZE, g2d->cmdlist_pool_virt,
-                       g2d->cmdlist_pool);
+       dma_free_attrs(subdrv->drm_dev->dev, G2D_CMDLIST_POOL_SIZE,
+                       g2d->cmdlist_pool_virt,
+                       g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs);
 }
 
 static struct g2d_cmdlist_node *g2d_get_cmdlist(struct g2d_data *g2d)
@@ -245,62 +282,300 @@ add_to_list:
                list_add_tail(&node->event->base.link, &g2d_priv->event_list);
 }
 
-static int g2d_get_cmdlist_gem(struct drm_device *drm_dev,
-                              struct drm_file *file,
-                              struct g2d_cmdlist_node *node)
+static void g2d_userptr_put_dma_addr(struct drm_device *drm_dev,
+                                       unsigned long obj,
+                                       bool force)
 {
-       struct drm_exynos_file_private *file_priv = file->driver_priv;
+       struct g2d_cmdlist_userptr *g2d_userptr =
+                                       (struct g2d_cmdlist_userptr *)obj;
+
+       if (!obj)
+               return;
+
+       if (force)
+               goto out;
+
+       atomic_dec(&g2d_userptr->refcount);
+
+       if (atomic_read(&g2d_userptr->refcount) > 0)
+               return;
+
+       if (g2d_userptr->in_pool)
+               return;
+
+out:
+       exynos_gem_unmap_sgt_from_dma(drm_dev, g2d_userptr->sgt,
+                                       DMA_BIDIRECTIONAL);
+
+       exynos_gem_put_pages_to_userptr(g2d_userptr->pages,
+                                       g2d_userptr->npages,
+                                       g2d_userptr->vma);
+
+       if (!g2d_userptr->out_of_list)
+               list_del_init(&g2d_userptr->list);
+
+       sg_free_table(g2d_userptr->sgt);
+       kfree(g2d_userptr->sgt);
+       g2d_userptr->sgt = NULL;
+
+       kfree(g2d_userptr->pages);
+       g2d_userptr->pages = NULL;
+       kfree(g2d_userptr);
+       g2d_userptr = NULL;
+}
+
+dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
+                                       unsigned long userptr,
+                                       unsigned long size,
+                                       struct drm_file *filp,
+                                       unsigned long *obj)
+{
+       struct drm_exynos_file_private *file_priv = filp->driver_priv;
+       struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
+       struct g2d_cmdlist_userptr *g2d_userptr;
+       struct g2d_data *g2d;
+       struct page **pages;
+       struct sg_table *sgt;
+       struct vm_area_struct *vma;
+       unsigned long start, end;
+       unsigned int npages, offset;
+       int ret;
+
+       if (!size) {
+               DRM_ERROR("invalid userptr size.\n");
+               return ERR_PTR(-EINVAL);
+       }
+
+       g2d = dev_get_drvdata(g2d_priv->dev);
+
+       /* check if userptr already exists in userptr_list. */
+       list_for_each_entry(g2d_userptr, &g2d_priv->userptr_list, list) {
+               if (g2d_userptr->userptr == userptr) {
+                       /*
+                        * also check size because there could be same address
+                        * and different size.
+                        */
+                       if (g2d_userptr->size == size) {
+                               atomic_inc(&g2d_userptr->refcount);
+                               *obj = (unsigned long)g2d_userptr;
+
+                               return &g2d_userptr->dma_addr;
+                       }
+
+                       /*
+                        * at this moment, maybe g2d dma is accessing this
+                        * g2d_userptr memory region so just remove this
+                        * g2d_userptr object from userptr_list not to be
+                        * referred again and also except it the userptr
+                        * pool to be released after the dma access completion.
+                        */
+                       g2d_userptr->out_of_list = true;
+                       g2d_userptr->in_pool = false;
+                       list_del_init(&g2d_userptr->list);
+
+                       break;
+               }
+       }
+
+       g2d_userptr = kzalloc(sizeof(*g2d_userptr), GFP_KERNEL);
+       if (!g2d_userptr) {
+               DRM_ERROR("failed to allocate g2d_userptr.\n");
+               return ERR_PTR(-ENOMEM);
+       }
+
+       atomic_set(&g2d_userptr->refcount, 1);
+
+       start = userptr & PAGE_MASK;
+       offset = userptr & ~PAGE_MASK;
+       end = PAGE_ALIGN(userptr + size);
+       npages = (end - start) >> PAGE_SHIFT;
+       g2d_userptr->npages = npages;
+
+       pages = kzalloc(npages * sizeof(struct page *), GFP_KERNEL);
+       if (!pages) {
+               DRM_ERROR("failed to allocate pages.\n");
+               kfree(g2d_userptr);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       vma = find_vma(current->mm, userptr);
+       if (!vma) {
+               DRM_ERROR("failed to get vm region.\n");
+               ret = -EFAULT;
+               goto err_free_pages;
+       }
+
+       if (vma->vm_end < userptr + size) {
+               DRM_ERROR("vma is too small.\n");
+               ret = -EFAULT;
+               goto err_free_pages;
+       }
+
+       g2d_userptr->vma = exynos_gem_get_vma(vma);
+       if (!g2d_userptr->vma) {
+               DRM_ERROR("failed to copy vma.\n");
+               ret = -ENOMEM;
+               goto err_free_pages;
+       }
+
+       g2d_userptr->size = size;
+
+       ret = exynos_gem_get_pages_from_userptr(start & PAGE_MASK,
+                                               npages, pages, vma);
+       if (ret < 0) {
+               DRM_ERROR("failed to get user pages from userptr.\n");
+               goto err_put_vma;
+       }
+
+       g2d_userptr->pages = pages;
+
+       sgt = kzalloc(sizeof *sgt, GFP_KERNEL);
+       if (!sgt) {
+               DRM_ERROR("failed to allocate sg table.\n");
+               ret = -ENOMEM;
+               goto err_free_userptr;
+       }
+
+       ret = sg_alloc_table_from_pages(sgt, pages, npages, offset,
+                                       size, GFP_KERNEL);
+       if (ret < 0) {
+               DRM_ERROR("failed to get sgt from pages.\n");
+               goto err_free_sgt;
+       }
+
+       g2d_userptr->sgt = sgt;
+
+       ret = exynos_gem_map_sgt_with_dma(drm_dev, g2d_userptr->sgt,
+                                               DMA_BIDIRECTIONAL);
+       if (ret < 0) {
+               DRM_ERROR("failed to map sgt with dma region.\n");
+               goto err_free_sgt;
+       }
+
+       g2d_userptr->dma_addr = sgt->sgl[0].dma_address;
+       g2d_userptr->userptr = userptr;
+
+       list_add_tail(&g2d_userptr->list, &g2d_priv->userptr_list);
+
+       if (g2d->current_pool + (npages << PAGE_SHIFT) < g2d->max_pool) {
+               g2d->current_pool += npages << PAGE_SHIFT;
+               g2d_userptr->in_pool = true;
+       }
+
+       *obj = (unsigned long)g2d_userptr;
+
+       return &g2d_userptr->dma_addr;
+
+err_free_sgt:
+       sg_free_table(sgt);
+       kfree(sgt);
+       sgt = NULL;
+
+err_free_userptr:
+       exynos_gem_put_pages_to_userptr(g2d_userptr->pages,
+                                       g2d_userptr->npages,
+                                       g2d_userptr->vma);
+
+err_put_vma:
+       exynos_gem_put_vma(g2d_userptr->vma);
+
+err_free_pages:
+       kfree(pages);
+       kfree(g2d_userptr);
+       pages = NULL;
+       g2d_userptr = NULL;
+
+       return ERR_PTR(ret);
+}
+
+static void g2d_userptr_free_all(struct drm_device *drm_dev,
+                                       struct g2d_data *g2d,
+                                       struct drm_file *filp)
+{
+       struct drm_exynos_file_private *file_priv = filp->driver_priv;
        struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
+       struct g2d_cmdlist_userptr *g2d_userptr, *n;
+
+       list_for_each_entry_safe(g2d_userptr, n, &g2d_priv->userptr_list, list)
+               if (g2d_userptr->in_pool)
+                       g2d_userptr_put_dma_addr(drm_dev,
+                                               (unsigned long)g2d_userptr,
+                                               true);
+
+       g2d->current_pool = 0;
+}
+
+static int g2d_map_cmdlist_gem(struct g2d_data *g2d,
+                               struct g2d_cmdlist_node *node,
+                               struct drm_device *drm_dev,
+                               struct drm_file *file)
+{
        struct g2d_cmdlist *cmdlist = node->cmdlist;
-       dma_addr_t *addr;
        int offset;
        int i;
 
-       for (i = 0; i < node->gem_nr; i++) {
-               struct g2d_gem_node *gem_node;
-
-               gem_node = kzalloc(sizeof(*gem_node), GFP_KERNEL);
-               if (!gem_node) {
-                       dev_err(g2d_priv->dev, "failed to allocate gem node\n");
-                       return -ENOMEM;
-               }
+       for (i = 0; i < node->map_nr; i++) {
+               unsigned long handle;
+               dma_addr_t *addr;
 
                offset = cmdlist->last - (i * 2 + 1);
-               gem_node->handle = cmdlist->data[offset];
-
-               addr = exynos_drm_gem_get_dma_addr(drm_dev, gem_node->handle,
-                                                  file);
-               if (IS_ERR(addr)) {
-                       node->gem_nr = i;
-                       kfree(gem_node);
-                       return PTR_ERR(addr);
+               handle = cmdlist->data[offset];
+
+               if (node->obj_type[i] == BUF_TYPE_GEM) {
+                       addr = exynos_drm_gem_get_dma_addr(drm_dev, handle,
+                                                               file);
+                       if (IS_ERR(addr)) {
+                               node->map_nr = i;
+                               return -EFAULT;
+                       }
+               } else {
+                       struct drm_exynos_g2d_userptr g2d_userptr;
+
+                       if (copy_from_user(&g2d_userptr, (void __user *)handle,
+                               sizeof(struct drm_exynos_g2d_userptr))) {
+                               node->map_nr = i;
+                               return -EFAULT;
+                       }
+
+                       addr = g2d_userptr_get_dma_addr(drm_dev,
+                                                       g2d_userptr.userptr,
+                                                       g2d_userptr.size,
+                                                       file,
+                                                       &handle);
+                       if (IS_ERR(addr)) {
+                               node->map_nr = i;
+                               return -EFAULT;
+                       }
                }
 
                cmdlist->data[offset] = *addr;
-               list_add_tail(&gem_node->list, &g2d_priv->gem_list);
-               g2d_priv->gem_nr++;
+               node->handles[i] = handle;
        }
 
        return 0;
 }
 
-static void g2d_put_cmdlist_gem(struct drm_device *drm_dev,
-                               struct drm_file *file,
-                               unsigned int nr)
+static void g2d_unmap_cmdlist_gem(struct g2d_data *g2d,
+                                 struct g2d_cmdlist_node *node,
+                                 struct drm_file *filp)
 {
-       struct drm_exynos_file_private *file_priv = file->driver_priv;
-       struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
-       struct g2d_gem_node *node, *n;
+       struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
+       int i;
 
-       list_for_each_entry_safe_reverse(node, n, &g2d_priv->gem_list, list) {
-               if (!nr)
-                       break;
+       for (i = 0; i < node->map_nr; i++) {
+               unsigned long handle = node->handles[i];
 
-               exynos_drm_gem_put_dma_addr(drm_dev, node->handle, file);
-               list_del_init(&node->list);
-               kfree(node);
-               nr--;
+               if (node->obj_type[i] == BUF_TYPE_GEM)
+                       exynos_drm_gem_put_dma_addr(subdrv->drm_dev, handle,
+                                                       filp);
+               else
+                       g2d_userptr_put_dma_addr(subdrv->drm_dev, handle,
+                                                       false);
+
+               node->handles[i] = 0;
        }
+
+       node->map_nr = 0;
 }
 
 static void g2d_dma_start(struct g2d_data *g2d,
@@ -337,10 +612,18 @@ static struct g2d_runqueue_node *g2d_get_runqueue_node(struct g2d_data *g2d)
 static void g2d_free_runqueue_node(struct g2d_data *g2d,
                                   struct g2d_runqueue_node *runqueue_node)
 {
+       struct g2d_cmdlist_node *node;
+
        if (!runqueue_node)
                return;
 
        mutex_lock(&g2d->cmdlist_mutex);
+       /*
+        * commands in run_cmdlist have been completed so unmap all gem
+        * objects in each command node so that they are unreferenced.
+        */
+       list_for_each_entry(node, &runqueue_node->run_cmdlist, list)
+               g2d_unmap_cmdlist_gem(g2d, node, runqueue_node->filp);
        list_splice_tail_init(&runqueue_node->run_cmdlist, &g2d->free_cmdlist);
        mutex_unlock(&g2d->cmdlist_mutex);
 
@@ -430,15 +713,28 @@ static irqreturn_t g2d_irq_handler(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
-static int g2d_check_reg_offset(struct device *dev, struct g2d_cmdlist *cmdlist,
+static int g2d_check_reg_offset(struct device *dev,
+                               struct g2d_cmdlist_node *node,
                                int nr, bool for_addr)
 {
+       struct g2d_cmdlist *cmdlist = node->cmdlist;
        int reg_offset;
        int index;
        int i;
 
        for (i = 0; i < nr; i++) {
                index = cmdlist->last - 2 * (i + 1);
+
+               if (for_addr) {
+                       /* check userptr buffer type. */
+                       reg_offset = (cmdlist->data[index] &
+                                       ~0x7fffffff) >> 31;
+                       if (reg_offset) {
+                               node->obj_type[i] = BUF_TYPE_USERPTR;
+                               cmdlist->data[index] &= ~G2D_BUF_USERPTR;
+                       }
+               }
+
                reg_offset = cmdlist->data[index] & ~0xfffff000;
 
                if (reg_offset < G2D_VALID_START || reg_offset > G2D_VALID_END)
@@ -455,6 +751,9 @@ static int g2d_check_reg_offset(struct device *dev, struct g2d_cmdlist *cmdlist,
                case G2D_MSK_BASE_ADDR:
                        if (!for_addr)
                                goto err;
+
+                       if (node->obj_type[i] != BUF_TYPE_USERPTR)
+                               node->obj_type[i] = BUF_TYPE_GEM;
                        break;
                default:
                        if (for_addr)
@@ -466,7 +765,7 @@ static int g2d_check_reg_offset(struct device *dev, struct g2d_cmdlist *cmdlist,
        return 0;
 
 err:
-       dev_err(dev, "Bad register offset: 0x%x\n", cmdlist->data[index]);
+       dev_err(dev, "Bad register offset: 0x%lx\n", cmdlist->data[index]);
        return -EINVAL;
 }
 
@@ -566,7 +865,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
        }
 
        /* Check size of cmdlist: last 2 is about G2D_BITBLT_START */
-       size = cmdlist->last + req->cmd_nr * 2 + req->cmd_gem_nr * 2 + 2;
+       size = cmdlist->last + req->cmd_nr * 2 + req->cmd_buf_nr * 2 + 2;
        if (size > G2D_CMDLIST_DATA_NUM) {
                dev_err(dev, "cmdlist size is too big\n");
                ret = -EINVAL;
@@ -583,29 +882,29 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
        }
        cmdlist->last += req->cmd_nr * 2;
 
-       ret = g2d_check_reg_offset(dev, cmdlist, req->cmd_nr, false);
+       ret = g2d_check_reg_offset(dev, node, req->cmd_nr, false);
        if (ret < 0)
                goto err_free_event;
 
-       node->gem_nr = req->cmd_gem_nr;
-       if (req->cmd_gem_nr) {
-               struct drm_exynos_g2d_cmd *cmd_gem;
+       node->map_nr = req->cmd_buf_nr;
+       if (req->cmd_buf_nr) {
+               struct drm_exynos_g2d_cmd *cmd_buf;
 
-               cmd_gem = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd_gem;
+               cmd_buf = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd_buf;
 
                if (copy_from_user(cmdlist->data + cmdlist->last,
-                                       (void __user *)cmd_gem,
-                                       sizeof(*cmd_gem) * req->cmd_gem_nr)) {
+                                       (void __user *)cmd_buf,
+                                       sizeof(*cmd_buf) * req->cmd_buf_nr)) {
                        ret = -EFAULT;
                        goto err_free_event;
                }
-               cmdlist->last += req->cmd_gem_nr * 2;
+               cmdlist->last += req->cmd_buf_nr * 2;
 
-               ret = g2d_check_reg_offset(dev, cmdlist, req->cmd_gem_nr, true);
+               ret = g2d_check_reg_offset(dev, node, req->cmd_buf_nr, true);
                if (ret < 0)
                        goto err_free_event;
 
-               ret = g2d_get_cmdlist_gem(drm_dev, file, node);
+               ret = g2d_map_cmdlist_gem(g2d, node, drm_dev, file);
                if (ret < 0)
                        goto err_unmap;
        }
@@ -624,7 +923,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
        return 0;
 
 err_unmap:
-       g2d_put_cmdlist_gem(drm_dev, file, node->gem_nr);
+       g2d_unmap_cmdlist_gem(g2d, node, file);
 err_free_event:
        if (node->event) {
                spin_lock_irqsave(&drm_dev->event_lock, flags);
@@ -680,6 +979,7 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
 
        mutex_lock(&g2d->runqueue_mutex);
        runqueue_node->pid = current->pid;
+       runqueue_node->filp = file;
        list_add_tail(&runqueue_node->list, &g2d->runqueue);
        if (!g2d->runqueue_node)
                g2d_exec_runqueue(g2d);
@@ -696,6 +996,43 @@ out:
 }
 EXPORT_SYMBOL_GPL(exynos_g2d_exec_ioctl);
 
+static int g2d_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
+{
+       struct g2d_data *g2d;
+       int ret;
+
+       g2d = dev_get_drvdata(dev);
+       if (!g2d)
+               return -EFAULT;
+
+       /* allocate dma-aware cmdlist buffer. */
+       ret = g2d_init_cmdlist(g2d);
+       if (ret < 0) {
+               dev_err(dev, "cmdlist init failed\n");
+               return ret;
+       }
+
+       if (!is_drm_iommu_supported(drm_dev))
+               return 0;
+
+       ret = drm_iommu_attach_device(drm_dev, dev);
+       if (ret < 0) {
+               dev_err(dev, "failed to enable iommu.\n");
+               g2d_fini_cmdlist(g2d);
+       }
+
+       return ret;
+
+}
+
+static void g2d_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
+{
+       if (!is_drm_iommu_supported(drm_dev))
+               return;
+
+       drm_iommu_detach_device(drm_dev, dev);
+}
+
 static int g2d_open(struct drm_device *drm_dev, struct device *dev,
                        struct drm_file *file)
 {
@@ -713,7 +1050,7 @@ static int g2d_open(struct drm_device *drm_dev, struct device *dev,
 
        INIT_LIST_HEAD(&g2d_priv->inuse_cmdlist);
        INIT_LIST_HEAD(&g2d_priv->event_list);
-       INIT_LIST_HEAD(&g2d_priv->gem_list);
+       INIT_LIST_HEAD(&g2d_priv->userptr_list);
 
        return 0;
 }
@@ -734,11 +1071,21 @@ static void g2d_close(struct drm_device *drm_dev, struct device *dev,
                return;
 
        mutex_lock(&g2d->cmdlist_mutex);
-       list_for_each_entry_safe(node, n, &g2d_priv->inuse_cmdlist, list)
+       list_for_each_entry_safe(node, n, &g2d_priv->inuse_cmdlist, list) {
+               /*
+                * unmap all gem objects not completed.
+                *
+                * P.S. if current process was terminated forcely then
+                * there may be some commands in inuse_cmdlist so unmap
+                * them.
+                */
+               g2d_unmap_cmdlist_gem(g2d, node, file);
                list_move_tail(&node->list, &g2d->free_cmdlist);
+       }
        mutex_unlock(&g2d->cmdlist_mutex);
 
-       g2d_put_cmdlist_gem(drm_dev, file, g2d_priv->gem_nr);
+       /* release all g2d_userptr in pool. */
+       g2d_userptr_free_all(drm_dev, g2d, file);
 
        kfree(file_priv->g2d_priv);
 }
@@ -778,15 +1125,11 @@ static int __devinit g2d_probe(struct platform_device *pdev)
        mutex_init(&g2d->cmdlist_mutex);
        mutex_init(&g2d->runqueue_mutex);
 
-       ret = g2d_init_cmdlist(g2d);
-       if (ret < 0)
-               goto err_destroy_workqueue;
-
-       g2d->gate_clk = clk_get(dev, "fimg2d");
+       g2d->gate_clk = devm_clk_get(dev, "fimg2d");
        if (IS_ERR(g2d->gate_clk)) {
                dev_err(dev, "failed to get gate clock\n");
                ret = PTR_ERR(g2d->gate_clk);
-               goto err_fini_cmdlist;
+               goto err_destroy_workqueue;
        }
 
        pm_runtime_enable(dev);
@@ -814,10 +1157,14 @@ static int __devinit g2d_probe(struct platform_device *pdev)
                goto err_put_clk;
        }
 
+       g2d->max_pool = MAX_POOL;
+
        platform_set_drvdata(pdev, g2d);
 
        subdrv = &g2d->subdrv;
        subdrv->dev = dev;
+       subdrv->probe = g2d_subdrv_probe;
+       subdrv->remove = g2d_subdrv_remove;
        subdrv->open = g2d_open;
        subdrv->close = g2d_close;
 
@@ -834,9 +1181,6 @@ static int __devinit g2d_probe(struct platform_device *pdev)
 
 err_put_clk:
        pm_runtime_disable(dev);
-       clk_put(g2d->gate_clk);
-err_fini_cmdlist:
-       g2d_fini_cmdlist(g2d);
 err_destroy_workqueue:
        destroy_workqueue(g2d->g2d_workq);
 err_destroy_slab:
@@ -857,7 +1201,6 @@ static int __devexit g2d_remove(struct platform_device *pdev)
        }
 
        pm_runtime_disable(&pdev->dev);
-       clk_put(g2d->gate_clk);
 
        g2d_fini_cmdlist(g2d);
        destroy_workqueue(g2d->g2d_workq);
index d254556..d48183e 100644 (file)
@@ -83,157 +83,40 @@ static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj,
 
 static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
 {
-       if (!IS_NONCONTIG_BUFFER(flags)) {
-               if (size >= SZ_1M)
-                       return roundup(size, SECTION_SIZE);
-               else if (size >= SZ_64K)
-                       return roundup(size, SZ_64K);
-               else
-                       goto out;
-       }
-out:
-       return roundup(size, PAGE_SIZE);
-}
-
-struct page **exynos_gem_get_pages(struct drm_gem_object *obj,
-                                               gfp_t gfpmask)
-{
-       struct page *p, **pages;
-       int i, npages;
-
-       npages = obj->size >> PAGE_SHIFT;
-
-       pages = drm_malloc_ab(npages, sizeof(struct page *));
-       if (pages == NULL)
-               return ERR_PTR(-ENOMEM);
-
-       for (i = 0; i < npages; i++) {
-               p = alloc_page(gfpmask);
-               if (IS_ERR(p))
-                       goto fail;
-               pages[i] = p;
-       }
-
-       return pages;
-
-fail:
-       while (--i)
-               __free_page(pages[i]);
-
-       drm_free_large(pages);
-       return ERR_CAST(p);
-}
-
-static void exynos_gem_put_pages(struct drm_gem_object *obj,
-                                       struct page **pages)
-{
-       int npages;
-
-       npages = obj->size >> PAGE_SHIFT;
-
-       while (--npages >= 0)
-               __free_page(pages[npages]);
+       /* TODO */
 
-       drm_free_large(pages);
+       return roundup(size, PAGE_SIZE);
 }
 
-static int exynos_drm_gem_map_pages(struct drm_gem_object *obj,
+static int exynos_drm_gem_map_buf(struct drm_gem_object *obj,
                                        struct vm_area_struct *vma,
                                        unsigned long f_vaddr,
                                        pgoff_t page_offset)
 {
        struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
        struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
+       struct scatterlist *sgl;
        unsigned long pfn;
+       int i;
 
-       if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
-               if (!buf->pages)
-                       return -EINTR;
-
-               pfn = page_to_pfn(buf->pages[page_offset++]);
-       } else
-               pfn = (buf->dma_addr >> PAGE_SHIFT) + page_offset;
-
-       return vm_insert_mixed(vma, f_vaddr, pfn);
-}
+       if (!buf->sgt)
+               return -EINTR;
 
-static int exynos_drm_gem_get_pages(struct drm_gem_object *obj)
-{
-       struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
-       struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
-       struct scatterlist *sgl;
-       struct page **pages;
-       unsigned int npages, i = 0;
-       int ret;
-
-       if (buf->pages) {
-               DRM_DEBUG_KMS("already allocated.\n");
+       if (page_offset >= (buf->size >> PAGE_SHIFT)) {
+               DRM_ERROR("invalid page offset\n");
                return -EINVAL;
        }
 
-       pages = exynos_gem_get_pages(obj, GFP_HIGHUSER_MOVABLE);
-       if (IS_ERR(pages)) {
-               DRM_ERROR("failed to get pages.\n");
-               return PTR_ERR(pages);
-       }
-
-       npages = obj->size >> PAGE_SHIFT;
-       buf->page_size = PAGE_SIZE;
-
-       buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
-       if (!buf->sgt) {
-               DRM_ERROR("failed to allocate sg table.\n");
-               ret = -ENOMEM;
-               goto err;
-       }
-
-       ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL);
-       if (ret < 0) {
-               DRM_ERROR("failed to initialize sg table.\n");
-               ret = -EFAULT;
-               goto err1;
-       }
-
        sgl = buf->sgt->sgl;
-
-       /* set all pages to sg list. */
-       while (i < npages) {
-               sg_set_page(sgl, pages[i], PAGE_SIZE, 0);
-               sg_dma_address(sgl) = page_to_phys(pages[i]);
-               i++;
-               sgl = sg_next(sgl);
+       for_each_sg(buf->sgt->sgl, sgl, buf->sgt->nents, i) {
+               if (page_offset < (sgl->length >> PAGE_SHIFT))
+                       break;
+               page_offset -=  (sgl->length >> PAGE_SHIFT);
        }
 
-       /* add some codes for UNCACHED type here. TODO */
-
-       buf->pages = pages;
-       return ret;
-err1:
-       kfree(buf->sgt);
-       buf->sgt = NULL;
-err:
-       exynos_gem_put_pages(obj, pages);
-       return ret;
-
-}
-
-static void exynos_drm_gem_put_pages(struct drm_gem_object *obj)
-{
-       struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
-       struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
-
-       /*
-        * if buffer typs is EXYNOS_BO_NONCONTIG then release all pages
-        * allocated at gem fault handler.
-        */
-       sg_free_table(buf->sgt);
-       kfree(buf->sgt);
-       buf->sgt = NULL;
-
-       exynos_gem_put_pages(obj, buf->pages);
-       buf->pages = NULL;
+       pfn = __phys_to_pfn(sg_phys(sgl)) + page_offset;
 
-       /* add some codes for UNCACHED type here. TODO */
+       return vm_insert_mixed(vma, f_vaddr, pfn);
 }
 
 static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
@@ -270,9 +153,6 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
 
        DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count));
 
-       if (!buf->pages)
-               return;
-
        /*
         * do not release memory region from exporter.
         *
@@ -282,10 +162,7 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
        if (obj->import_attach)
                goto out;
 
-       if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG)
-               exynos_drm_gem_put_pages(obj);
-       else
-               exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
+       exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
 
 out:
        exynos_drm_fini_buf(obj->dev, buf);
@@ -364,22 +241,10 @@ struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
        /* set memory type and cache attribute from user side. */
        exynos_gem_obj->flags = flags;
 
-       /*
-        * allocate all pages as desired size if user wants to allocate
-        * physically non-continuous memory.
-        */
-       if (flags & EXYNOS_BO_NONCONTIG) {
-               ret = exynos_drm_gem_get_pages(&exynos_gem_obj->base);
-               if (ret < 0) {
-                       drm_gem_object_release(&exynos_gem_obj->base);
-                       goto err_fini_buf;
-               }
-       } else {
-               ret = exynos_drm_alloc_buf(dev, buf, flags);
-               if (ret < 0) {
-                       drm_gem_object_release(&exynos_gem_obj->base);
-                       goto err_fini_buf;
-               }
+       ret = exynos_drm_alloc_buf(dev, buf, flags);
+       if (ret < 0) {
+               drm_gem_object_release(&exynos_gem_obj->base);
+               goto err_fini_buf;
        }
 
        return exynos_gem_obj;
@@ -412,14 +277,14 @@ int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
        return 0;
 }
 
-void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
+dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
                                        unsigned int gem_handle,
-                                       struct drm_file *file_priv)
+                                       struct drm_file *filp)
 {
        struct exynos_drm_gem_obj *exynos_gem_obj;
        struct drm_gem_object *obj;
 
-       obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
+       obj = drm_gem_object_lookup(dev, filp, gem_handle);
        if (!obj) {
                DRM_ERROR("failed to lookup gem object.\n");
                return ERR_PTR(-EINVAL);
@@ -427,25 +292,17 @@ void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
 
        exynos_gem_obj = to_exynos_gem_obj(obj);
 
-       if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
-               DRM_DEBUG_KMS("not support NONCONTIG type.\n");
-               drm_gem_object_unreference_unlocked(obj);
-
-               /* TODO */
-               return ERR_PTR(-EINVAL);
-       }
-
        return &exynos_gem_obj->buffer->dma_addr;
 }
 
 void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
                                        unsigned int gem_handle,
-                                       struct drm_file *file_priv)
+                                       struct drm_file *filp)
 {
        struct exynos_drm_gem_obj *exynos_gem_obj;
        struct drm_gem_object *obj;
 
-       obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
+       obj = drm_gem_object_lookup(dev, filp, gem_handle);
        if (!obj) {
                DRM_ERROR("failed to lookup gem object.\n");
                return;
@@ -453,14 +310,6 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
 
        exynos_gem_obj = to_exynos_gem_obj(obj);
 
-       if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
-               DRM_DEBUG_KMS("not support NONCONTIG type.\n");
-               drm_gem_object_unreference_unlocked(obj);
-
-               /* TODO */
-               return;
-       }
-
        drm_gem_object_unreference_unlocked(obj);
 
        /*
@@ -489,22 +338,57 @@ int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
                        &args->offset);
 }
 
+static struct drm_file *exynos_drm_find_drm_file(struct drm_device *drm_dev,
+                                                       struct file *filp)
+{
+       struct drm_file *file_priv;
+
+       mutex_lock(&drm_dev->struct_mutex);
+
+       /* find current process's drm_file from filelist. */
+       list_for_each_entry(file_priv, &drm_dev->filelist, lhead) {
+               if (file_priv->filp == filp) {
+                       mutex_unlock(&drm_dev->struct_mutex);
+                       return file_priv;
+               }
+       }
+
+       mutex_unlock(&drm_dev->struct_mutex);
+       WARN_ON(1);
+
+       return ERR_PTR(-EFAULT);
+}
+
 static int exynos_drm_gem_mmap_buffer(struct file *filp,
                                      struct vm_area_struct *vma)
 {
        struct drm_gem_object *obj = filp->private_data;
        struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
+       struct drm_device *drm_dev = obj->dev;
        struct exynos_drm_gem_buf *buffer;
-       unsigned long pfn, vm_size, usize, uaddr = vma->vm_start;
+       struct drm_file *file_priv;
+       unsigned long vm_size;
        int ret;
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
        vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+       vma->vm_private_data = obj;
+       vma->vm_ops = drm_dev->driver->gem_vm_ops;
+
+       /* restore it to driver's fops. */
+       filp->f_op = fops_get(drm_dev->driver->fops);
+
+       file_priv = exynos_drm_find_drm_file(drm_dev, filp);
+       if (IS_ERR(file_priv))
+               return PTR_ERR(file_priv);
+
+       /* restore it to drm_file. */
+       filp->private_data = file_priv;
 
        update_vm_cache_attr(exynos_gem_obj, vma);
 
-       vm_size = usize = vma->vm_end - vma->vm_start;
+       vm_size = vma->vm_end - vma->vm_start;
 
        /*
         * a buffer contains information to physically continuous memory
@@ -516,40 +400,23 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
        if (vm_size > buffer->size)
                return -EINVAL;
 
-       if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
-               int i = 0;
-
-               if (!buffer->pages)
-                       return -EINVAL;
+       ret = dma_mmap_attrs(drm_dev->dev, vma, buffer->pages,
+                               buffer->dma_addr, buffer->size,
+                               &buffer->dma_attrs);
+       if (ret < 0) {
+               DRM_ERROR("failed to mmap.\n");
+               return ret;
+       }
 
-               vma->vm_flags |= VM_MIXEDMAP;
+       /*
+        * take a reference to this mapping of the object. And this reference
+        * is unreferenced by the corresponding vm_close call.
+        */
+       drm_gem_object_reference(obj);
 
-               do {
-                       ret = vm_insert_page(vma, uaddr, buffer->pages[i++]);
-                       if (ret) {
-                               DRM_ERROR("failed to remap user space.\n");
-                               return ret;
-                       }
-
-                       uaddr += PAGE_SIZE;
-                       usize -= PAGE_SIZE;
-               } while (usize > 0);
-       } else {
-               /*
-                * get page frame number to physical memory to be mapped
-                * to user space.
-                */
-               pfn = ((unsigned long)exynos_gem_obj->buffer->dma_addr) >>
-                                                               PAGE_SHIFT;
-
-               DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn);
-
-               if (remap_pfn_range(vma, vma->vm_start, pfn, vm_size,
-                                       vma->vm_page_prot)) {
-                       DRM_ERROR("failed to remap pfn range.\n");
-                       return -EAGAIN;
-               }
-       }
+       mutex_lock(&drm_dev->struct_mutex);
+       drm_vm_open_locked(drm_dev, vma);
+       mutex_unlock(&drm_dev->struct_mutex);
 
        return 0;
 }
@@ -578,16 +445,29 @@ int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
                return -EINVAL;
        }
 
-       obj->filp->f_op = &exynos_drm_gem_fops;
-       obj->filp->private_data = obj;
+       /*
+        * Set specific mmper's fops. And it will be restored by
+        * exynos_drm_gem_mmap_buffer to dev->driver->fops.
+        * This is used to call specific mapper temporarily.
+        */
+       file_priv->filp->f_op = &exynos_drm_gem_fops;
 
-       addr = vm_mmap(obj->filp, 0, args->size,
+       /*
+        * Set gem object to private_data so that specific mmaper
+        * can get the gem object. And it will be restored by
+        * exynos_drm_gem_mmap_buffer to drm_file.
+        */
+       file_priv->filp->private_data = obj;
+
+       addr = vm_mmap(file_priv->filp, 0, args->size,
                        PROT_READ | PROT_WRITE, MAP_SHARED, 0);
 
        drm_gem_object_unreference_unlocked(obj);
 
-       if (IS_ERR((void *)addr))
+       if (IS_ERR((void *)addr)) {
+               file_priv->filp->private_data = file_priv;
                return PTR_ERR((void *)addr);
+       }
 
        args->mapped = addr;
 
@@ -622,6 +502,129 @@ int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
        return 0;
 }
 
+struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma)
+{
+       struct vm_area_struct *vma_copy;
+
+       vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL);
+       if (!vma_copy)
+               return NULL;
+
+       if (vma->vm_ops && vma->vm_ops->open)
+               vma->vm_ops->open(vma);
+
+       if (vma->vm_file)
+               get_file(vma->vm_file);
+
+       memcpy(vma_copy, vma, sizeof(*vma));
+
+       vma_copy->vm_mm = NULL;
+       vma_copy->vm_next = NULL;
+       vma_copy->vm_prev = NULL;
+
+       return vma_copy;
+}
+
+void exynos_gem_put_vma(struct vm_area_struct *vma)
+{
+       if (!vma)
+               return;
+
+       if (vma->vm_ops && vma->vm_ops->close)
+               vma->vm_ops->close(vma);
+
+       if (vma->vm_file)
+               fput(vma->vm_file);
+
+       kfree(vma);
+}
+
+int exynos_gem_get_pages_from_userptr(unsigned long start,
+                                               unsigned int npages,
+                                               struct page **pages,
+                                               struct vm_area_struct *vma)
+{
+       int get_npages;
+
+       /* the memory region mmaped with VM_PFNMAP. */
+       if (vma_is_io(vma)) {
+               unsigned int i;
+
+               for (i = 0; i < npages; ++i, start += PAGE_SIZE) {
+                       unsigned long pfn;
+                       int ret = follow_pfn(vma, start, &pfn);
+                       if (ret)
+                               return ret;
+
+                       pages[i] = pfn_to_page(pfn);
+               }
+
+               if (i != npages) {
+                       DRM_ERROR("failed to get user_pages.\n");
+                       return -EINVAL;
+               }
+
+               return 0;
+       }
+
+       get_npages = get_user_pages(current, current->mm, start,
+                                       npages, 1, 1, pages, NULL);
+       get_npages = max(get_npages, 0);
+       if (get_npages != npages) {
+               DRM_ERROR("failed to get user_pages.\n");
+               while (get_npages)
+                       put_page(pages[--get_npages]);
+               return -EFAULT;
+       }
+
+       return 0;
+}
+
+void exynos_gem_put_pages_to_userptr(struct page **pages,
+                                       unsigned int npages,
+                                       struct vm_area_struct *vma)
+{
+       if (!vma_is_io(vma)) {
+               unsigned int i;
+
+               for (i = 0; i < npages; i++) {
+                       set_page_dirty_lock(pages[i]);
+
+                       /*
+                        * undo the reference we took when populating
+                        * the table.
+                        */
+                       put_page(pages[i]);
+               }
+       }
+}
+
+int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
+                               struct sg_table *sgt,
+                               enum dma_data_direction dir)
+{
+       int nents;
+
+       mutex_lock(&drm_dev->struct_mutex);
+
+       nents = dma_map_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
+       if (!nents) {
+               DRM_ERROR("failed to map sgl with dma.\n");
+               mutex_unlock(&drm_dev->struct_mutex);
+               return nents;
+       }
+
+       mutex_unlock(&drm_dev->struct_mutex);
+       return 0;
+}
+
+void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
+                               struct sg_table *sgt,
+                               enum dma_data_direction dir)
+{
+       dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
+}
+
 int exynos_drm_gem_init_object(struct drm_gem_object *obj)
 {
        DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -753,9 +756,9 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 
        mutex_lock(&dev->struct_mutex);
 
-       ret = exynos_drm_gem_map_pages(obj, vma, f_vaddr, page_offset);
+       ret = exynos_drm_gem_map_buf(obj, vma, f_vaddr, page_offset);
        if (ret < 0)
-               DRM_ERROR("failed to map pages.\n");
+               DRM_ERROR("failed to map a buffer with user.\n");
 
        mutex_unlock(&dev->struct_mutex);
 
index 085b2a5..f11f2af 100644 (file)
  * exynos drm gem buffer structure.
  *
  * @kvaddr: kernel virtual address to allocated memory region.
+ * *userptr: user space address.
  * @dma_addr: bus address(accessed by dma) to allocated memory region.
  *     - this address could be physical address without IOMMU and
  *     device address with IOMMU.
+ * @write: whether pages will be written to by the caller.
+ * @pages: Array of backing pages.
  * @sgt: sg table to transfer page data.
- * @pages: contain all pages to allocated memory region.
- * @page_size: could be 4K, 64K or 1MB.
  * @size: size of allocated memory region.
+ * @pfnmap: indicate whether memory region from userptr is mmaped with
+ *     VM_PFNMAP or not.
  */
 struct exynos_drm_gem_buf {
        void __iomem            *kvaddr;
+       unsigned long           userptr;
        dma_addr_t              dma_addr;
-       struct sg_table         *sgt;
+       struct dma_attrs        dma_attrs;
+       unsigned int            write;
        struct page             **pages;
-       unsigned long           page_size;
+       struct sg_table         *sgt;
        unsigned long           size;
+       bool                    pfnmap;
 };
 
 /*
@@ -65,6 +71,7 @@ struct exynos_drm_gem_buf {
  *     or at framebuffer creation.
  * @size: size requested from user, in bytes and this size is aligned
  *     in page unit.
+ * @vma: a pointer to vm_area.
  * @flags: indicate memory type to allocated buffer and cache attruibute.
  *
  * P.S. this object would be transfered to user as kms_bo.handle so
@@ -74,6 +81,7 @@ struct exynos_drm_gem_obj {
        struct drm_gem_object           base;
        struct exynos_drm_gem_buf       *buffer;
        unsigned long                   size;
+       struct vm_area_struct           *vma;
        unsigned int                    flags;
 };
 
@@ -104,9 +112,9 @@ int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
  * other drivers such as 2d/3d acceleration drivers.
  * with this function call, gem object reference count would be increased.
  */
-void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
+dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
                                        unsigned int gem_handle,
-                                       struct drm_file *file_priv);
+                                       struct drm_file *filp);
 
 /*
  * put dma address from gem handle and this function could be used for
@@ -115,7 +123,7 @@ void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
  */
 void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
                                        unsigned int gem_handle,
-                                       struct drm_file *file_priv);
+                                       struct drm_file *filp);
 
 /* get buffer offset to map to user space. */
 int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
@@ -128,6 +136,10 @@ int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
 int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
                              struct drm_file *file_priv);
 
+/* map user space allocated by malloc to pages. */
+int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data,
+                                     struct drm_file *file_priv);
+
 /* get buffer information to memory region allocated by gem. */
 int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
                                      struct drm_file *file_priv);
@@ -163,4 +175,36 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
 /* set vm_flags and we can change the vm attribute to other one at here. */
 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
 
+static inline int vma_is_io(struct vm_area_struct *vma)
+{
+       return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
+}
+
+/* get a copy of a virtual memory region. */
+struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma);
+
+/* release a userspace virtual memory area. */
+void exynos_gem_put_vma(struct vm_area_struct *vma);
+
+/* get pages from user space. */
+int exynos_gem_get_pages_from_userptr(unsigned long start,
+                                               unsigned int npages,
+                                               struct page **pages,
+                                               struct vm_area_struct *vma);
+
+/* drop the reference to pages. */
+void exynos_gem_put_pages_to_userptr(struct page **pages,
+                                       unsigned int npages,
+                                       struct vm_area_struct *vma);
+
+/* map sgt with dma region. */
+int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
+                               struct sg_table *sgt,
+                               enum dma_data_direction dir);
+
+/* unmap sgt from dma region. */
+void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
+                               struct sg_table *sgt,
+                               enum dma_data_direction dir);
+
 #endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
new file mode 100644 (file)
index 0000000..5639353
--- /dev/null
@@ -0,0 +1,1870 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * Authors:
+ *     Eunchul Kim <chulspro.kim@samsung.com>
+ *     Jinyoung Jeon <jy0.jeon@samsung.com>
+ *     Sangmin Lee <lsmin.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+#include <plat/map-base.h>
+
+#include <drm/drmP.h>
+#include <drm/exynos_drm.h>
+#include "regs-gsc.h"
+#include "exynos_drm_ipp.h"
+#include "exynos_drm_gsc.h"
+
+/*
+ * GSC is stand for General SCaler and
+ * supports image scaler/rotator and input/output DMA operations.
+ * input DMA reads image data from the memory.
+ * output DMA writes image data to memory.
+ * GSC supports image rotation and image effect functions.
+ *
+ * M2M operation : supports crop/scale/rotation/csc so on.
+ * Memory ----> GSC H/W ----> Memory.
+ * Writeback operation : supports cloned screen with FIMD.
+ * FIMD ----> GSC H/W ----> Memory.
+ * Output operation : supports direct display using local path.
+ * Memory ----> GSC H/W ----> FIMD, Mixer.
+ */
+
+/*
+ * TODO
+ * 1. check suspend/resume api if needed.
+ * 2. need to check use case platform_device_id.
+ * 3. check src/dst size with, height.
+ * 4. added check_prepare api for right register.
+ * 5. need to add supported list in prop_list.
+ * 6. check prescaler/scaler optimization.
+ */
+
+#define GSC_MAX_DEVS   4
+#define GSC_MAX_SRC            4
+#define GSC_MAX_DST            16
+#define GSC_RESET_TIMEOUT      50
+#define GSC_BUF_STOP   1
+#define GSC_BUF_START  2
+#define GSC_REG_SZ             16
+#define GSC_WIDTH_ITU_709      1280
+#define GSC_SC_UP_MAX_RATIO            65536
+#define GSC_SC_DOWN_RATIO_7_8          74898
+#define GSC_SC_DOWN_RATIO_6_8          87381
+#define GSC_SC_DOWN_RATIO_5_8          104857
+#define GSC_SC_DOWN_RATIO_4_8          131072
+#define GSC_SC_DOWN_RATIO_3_8          174762
+#define GSC_SC_DOWN_RATIO_2_8          262144
+#define GSC_REFRESH_MIN        12
+#define GSC_REFRESH_MAX        60
+#define GSC_CROP_MAX   8192
+#define GSC_CROP_MIN   32
+#define GSC_SCALE_MAX  4224
+#define GSC_SCALE_MIN  32
+#define GSC_COEF_RATIO 7
+#define GSC_COEF_PHASE 9
+#define GSC_COEF_ATTR  16
+#define GSC_COEF_H_8T  8
+#define GSC_COEF_V_4T  4
+#define GSC_COEF_DEPTH 3
+
+#define get_gsc_context(dev)   platform_get_drvdata(to_platform_device(dev))
+#define get_ctx_from_ippdrv(ippdrv)    container_of(ippdrv,\
+                                       struct gsc_context, ippdrv);
+#define gsc_read(offset)               readl(ctx->regs + (offset))
+#define gsc_write(cfg, offset) writel(cfg, ctx->regs + (offset))
+
+/*
+ * A structure of scaler.
+ *
+ * @range: narrow, wide.
+ * @pre_shfactor: pre sclaer shift factor.
+ * @pre_hratio: horizontal ratio of the prescaler.
+ * @pre_vratio: vertical ratio of the prescaler.
+ * @main_hratio: the main scaler's horizontal ratio.
+ * @main_vratio: the main scaler's vertical ratio.
+ */
+struct gsc_scaler {
+       bool    range;
+       u32     pre_shfactor;
+       u32     pre_hratio;
+       u32     pre_vratio;
+       unsigned long main_hratio;
+       unsigned long main_vratio;
+};
+
+/*
+ * A structure of scaler capability.
+ *
+ * find user manual 49.2 features.
+ * @tile_w: tile mode or rotation width.
+ * @tile_h: tile mode or rotation height.
+ * @w: other cases width.
+ * @h: other cases height.
+ */
+struct gsc_capability {
+       /* tile or rotation */
+       u32     tile_w;
+       u32     tile_h;
+       /* other cases */
+       u32     w;
+       u32     h;
+};
+
+/*
+ * A structure of gsc context.
+ *
+ * @ippdrv: prepare initialization using ippdrv.
+ * @regs_res: register resources.
+ * @regs: memory mapped io registers.
+ * @lock: locking of operations.
+ * @gsc_clk: gsc gate clock.
+ * @sc: scaler infomations.
+ * @id: gsc id.
+ * @irq: irq number.
+ * @rotation: supports rotation of src.
+ * @suspended: qos operations.
+ */
+struct gsc_context {
+       struct exynos_drm_ippdrv        ippdrv;
+       struct resource *regs_res;
+       void __iomem    *regs;
+       struct mutex    lock;
+       struct clk      *gsc_clk;
+       struct gsc_scaler       sc;
+       int     id;
+       int     irq;
+       bool    rotation;
+       bool    suspended;
+};
+
+/* 8-tap Filter Coefficient */
+static const int h_coef_8t[GSC_COEF_RATIO][GSC_COEF_ATTR][GSC_COEF_H_8T] = {
+       {       /* Ratio <= 65536 (~8:8) */
+               {  0,  0,   0, 128,   0,   0,  0,  0 },
+               { -1,  2,  -6, 127,   7,  -2,  1,  0 },
+               { -1,  4, -12, 125,  16,  -5,  1,  0 },
+               { -1,  5, -15, 120,  25,  -8,  2,  0 },
+               { -1,  6, -18, 114,  35, -10,  3, -1 },
+               { -1,  6, -20, 107,  46, -13,  4, -1 },
+               { -2,  7, -21,  99,  57, -16,  5, -1 },
+               { -1,  6, -20,  89,  68, -18,  5, -1 },
+               { -1,  6, -20,  79,  79, -20,  6, -1 },
+               { -1,  5, -18,  68,  89, -20,  6, -1 },
+               { -1,  5, -16,  57,  99, -21,  7, -2 },
+               { -1,  4, -13,  46, 107, -20,  6, -1 },
+               { -1,  3, -10,  35, 114, -18,  6, -1 },
+               {  0,  2,  -8,  25, 120, -15,  5, -1 },
+               {  0,  1,  -5,  16, 125, -12,  4, -1 },
+               {  0,  1,  -2,   7, 127,  -6,  2, -1 }
+       }, {    /* 65536 < Ratio <= 74898 (~8:7) */
+               {  3, -8,  14, 111,  13,  -8,  3,  0 },
+               {  2, -6,   7, 112,  21, -10,  3, -1 },
+               {  2, -4,   1, 110,  28, -12,  4, -1 },
+               {  1, -2,  -3, 106,  36, -13,  4, -1 },
+               {  1, -1,  -7, 103,  44, -15,  4, -1 },
+               {  1,  1, -11,  97,  53, -16,  4, -1 },
+               {  0,  2, -13,  91,  61, -16,  4, -1 },
+               {  0,  3, -15,  85,  69, -17,  4, -1 },
+               {  0,  3, -16,  77,  77, -16,  3,  0 },
+               { -1,  4, -17,  69,  85, -15,  3,  0 },
+               { -1,  4, -16,  61,  91, -13,  2,  0 },
+               { -1,  4, -16,  53,  97, -11,  1,  1 },
+               { -1,  4, -15,  44, 103,  -7, -1,  1 },
+               { -1,  4, -13,  36, 106,  -3, -2,  1 },
+               { -1,  4, -12,  28, 110,   1, -4,  2 },
+               { -1,  3, -10,  21, 112,   7, -6,  2 }
+       }, {    /* 74898 < Ratio <= 87381 (~8:6) */
+               { 2, -11,  25,  96, 25, -11,   2,  0 },
+               { 2, -10,  19,  96, 31, -12,   2,  0 },
+               { 2,  -9,  14,  94, 37, -12,   2,  0 },
+               { 2,  -8,  10,  92, 43, -12,   1,  0 },
+               { 2,  -7,   5,  90, 49, -12,   1,  0 },
+               { 2,  -5,   1,  86, 55, -12,   0,  1 },
+               { 2,  -4,  -2,  82, 61, -11,  -1,  1 },
+               { 1,  -3,  -5,  77, 67,  -9,  -1,  1 },
+               { 1,  -2,  -7,  72, 72,  -7,  -2,  1 },
+               { 1,  -1,  -9,  67, 77,  -5,  -3,  1 },
+               { 1,  -1, -11,  61, 82,  -2,  -4,  2 },
+               { 1,   0, -12,  55, 86,   1,  -5,  2 },
+               { 0,   1, -12,  49, 90,   5,  -7,  2 },
+               { 0,   1, -12,  43, 92,  10,  -8,  2 },
+               { 0,   2, -12,  37, 94,  14,  -9,  2 },
+               { 0,   2, -12,  31, 96,  19, -10,  2 }
+       }, {    /* 87381 < Ratio <= 104857 (~8:5) */
+               { -1,  -8, 33,  80, 33,  -8,  -1,  0 },
+               { -1,  -8, 28,  80, 37,  -7,  -2,  1 },
+               {  0,  -8, 24,  79, 41,  -7,  -2,  1 },
+               {  0,  -8, 20,  78, 46,  -6,  -3,  1 },
+               {  0,  -8, 16,  76, 50,  -4,  -3,  1 },
+               {  0,  -7, 13,  74, 54,  -3,  -4,  1 },
+               {  1,  -7, 10,  71, 58,  -1,  -5,  1 },
+               {  1,  -6,  6,  68, 62,   1,  -5,  1 },
+               {  1,  -6,  4,  65, 65,   4,  -6,  1 },
+               {  1,  -5,  1,  62, 68,   6,  -6,  1 },
+               {  1,  -5, -1,  58, 71,  10,  -7,  1 },
+               {  1,  -4, -3,  54, 74,  13,  -7,  0 },
+               {  1,  -3, -4,  50, 76,  16,  -8,  0 },
+               {  1,  -3, -6,  46, 78,  20,  -8,  0 },
+               {  1,  -2, -7,  41, 79,  24,  -8,  0 },
+               {  1,  -2, -7,  37, 80,  28,  -8, -1 }
+       }, {    /* 104857 < Ratio <= 131072 (~8:4) */
+               { -3,   0, 35,  64, 35,   0,  -3,  0 },
+               { -3,  -1, 32,  64, 38,   1,  -3,  0 },
+               { -2,  -2, 29,  63, 41,   2,  -3,  0 },
+               { -2,  -3, 27,  63, 43,   4,  -4,  0 },
+               { -2,  -3, 24,  61, 46,   6,  -4,  0 },
+               { -2,  -3, 21,  60, 49,   7,  -4,  0 },
+               { -1,  -4, 19,  59, 51,   9,  -4, -1 },
+               { -1,  -4, 16,  57, 53,  12,  -4, -1 },
+               { -1,  -4, 14,  55, 55,  14,  -4, -1 },
+               { -1,  -4, 12,  53, 57,  16,  -4, -1 },
+               { -1,  -4,  9,  51, 59,  19,  -4, -1 },
+               {  0,  -4,  7,  49, 60,  21,  -3, -2 },
+               {  0,  -4,  6,  46, 61,  24,  -3, -2 },
+               {  0,  -4,  4,  43, 63,  27,  -3, -2 },
+               {  0,  -3,  2,  41, 63,  29,  -2, -2 },
+               {  0,  -3,  1,  38, 64,  32,  -1, -3 }
+       }, {    /* 131072 < Ratio <= 174762 (~8:3) */
+               { -1,   8, 33,  48, 33,   8,  -1,  0 },
+               { -1,   7, 31,  49, 35,   9,  -1, -1 },
+               { -1,   6, 30,  49, 36,  10,  -1, -1 },
+               { -1,   5, 28,  48, 38,  12,  -1, -1 },
+               { -1,   4, 26,  48, 39,  13,   0, -1 },
+               { -1,   3, 24,  47, 41,  15,   0, -1 },
+               { -1,   2, 23,  47, 42,  16,   0, -1 },
+               { -1,   2, 21,  45, 43,  18,   1, -1 },
+               { -1,   1, 19,  45, 45,  19,   1, -1 },
+               { -1,   1, 18,  43, 45,  21,   2, -1 },
+               { -1,   0, 16,  42, 47,  23,   2, -1 },
+               { -1,   0, 15,  41, 47,  24,   3, -1 },
+               { -1,   0, 13,  39, 48,  26,   4, -1 },
+               { -1,  -1, 12,  38, 48,  28,   5, -1 },
+               { -1,  -1, 10,  36, 49,  30,   6, -1 },
+               { -1,  -1,  9,  35, 49,  31,   7, -1 }
+       }, {    /* 174762 < Ratio <= 262144 (~8:2) */
+               {  2,  13, 30,  38, 30,  13,   2,  0 },
+               {  2,  12, 29,  38, 30,  14,   3,  0 },
+               {  2,  11, 28,  38, 31,  15,   3,  0 },
+               {  2,  10, 26,  38, 32,  16,   4,  0 },
+               {  1,  10, 26,  37, 33,  17,   4,  0 },
+               {  1,   9, 24,  37, 34,  18,   5,  0 },
+               {  1,   8, 24,  37, 34,  19,   5,  0 },
+               {  1,   7, 22,  36, 35,  20,   6,  1 },
+               {  1,   6, 21,  36, 36,  21,   6,  1 },
+               {  1,   6, 20,  35, 36,  22,   7,  1 },
+               {  0,   5, 19,  34, 37,  24,   8,  1 },
+               {  0,   5, 18,  34, 37,  24,   9,  1 },
+               {  0,   4, 17,  33, 37,  26,  10,  1 },
+               {  0,   4, 16,  32, 38,  26,  10,  2 },
+               {  0,   3, 15,  31, 38,  28,  11,  2 },
+               {  0,   3, 14,  30, 38,  29,  12,  2 }
+       }
+};
+
+/* 4-tap Filter Coefficient */
+static const int v_coef_4t[GSC_COEF_RATIO][GSC_COEF_ATTR][GSC_COEF_V_4T] = {
+       {       /* Ratio <= 65536 (~8:8) */
+               {  0, 128,   0,  0 },
+               { -4, 127,   5,  0 },
+               { -6, 124,  11, -1 },
+               { -8, 118,  19, -1 },
+               { -8, 111,  27, -2 },
+               { -8, 102,  37, -3 },
+               { -8,  92,  48, -4 },
+               { -7,  81,  59, -5 },
+               { -6,  70,  70, -6 },
+               { -5,  59,  81, -7 },
+               { -4,  48,  92, -8 },
+               { -3,  37, 102, -8 },
+               { -2,  27, 111, -8 },
+               { -1,  19, 118, -8 },
+               { -1,  11, 124, -6 },
+               {  0,   5, 127, -4 }
+       }, {    /* 65536 < Ratio <= 74898 (~8:7) */
+               {  8, 112,   8,  0 },
+               {  4, 111,  14, -1 },
+               {  1, 109,  20, -2 },
+               { -2, 105,  27, -2 },
+               { -3, 100,  34, -3 },
+               { -5,  93,  43, -3 },
+               { -5,  86,  51, -4 },
+               { -5,  77,  60, -4 },
+               { -5,  69,  69, -5 },
+               { -4,  60,  77, -5 },
+               { -4,  51,  86, -5 },
+               { -3,  43,  93, -5 },
+               { -3,  34, 100, -3 },
+               { -2,  27, 105, -2 },
+               { -2,  20, 109,  1 },
+               { -1,  14, 111,  4 }
+       }, {    /* 74898 < Ratio <= 87381 (~8:6) */
+               { 16,  96,  16,  0 },
+               { 12,  97,  21, -2 },
+               {  8,  96,  26, -2 },
+               {  5,  93,  32, -2 },
+               {  2,  89,  39, -2 },
+               {  0,  84,  46, -2 },
+               { -1,  79,  53, -3 },
+               { -2,  73,  59, -2 },
+               { -2,  66,  66, -2 },
+               { -2,  59,  73, -2 },
+               { -3,  53,  79, -1 },
+               { -2,  46,  84,  0 },
+               { -2,  39,  89,  2 },
+               { -2,  32,  93,  5 },
+               { -2,  26,  96,  8 },
+               { -2,  21,  97, 12 }
+       }, {    /* 87381 < Ratio <= 104857 (~8:5) */
+               { 22,  84,  22,  0 },
+               { 18,  85,  26, -1 },
+               { 14,  84,  31, -1 },
+               { 11,  82,  36, -1 },
+               {  8,  79,  42, -1 },
+               {  6,  76,  47, -1 },
+               {  4,  72,  52,  0 },
+               {  2,  68,  58,  0 },
+               {  1,  63,  63,  1 },
+               {  0,  58,  68,  2 },
+               {  0,  52,  72,  4 },
+               { -1,  47,  76,  6 },
+               { -1,  42,  79,  8 },
+               { -1,  36,  82, 11 },
+               { -1,  31,  84, 14 },
+               { -1,  26,  85, 18 }
+       }, {    /* 104857 < Ratio <= 131072 (~8:4) */
+               { 26,  76,  26,  0 },
+               { 22,  76,  30,  0 },
+               { 19,  75,  34,  0 },
+               { 16,  73,  38,  1 },
+               { 13,  71,  43,  1 },
+               { 10,  69,  47,  2 },
+               {  8,  66,  51,  3 },
+               {  6,  63,  55,  4 },
+               {  5,  59,  59,  5 },
+               {  4,  55,  63,  6 },
+               {  3,  51,  66,  8 },
+               {  2,  47,  69, 10 },
+               {  1,  43,  71, 13 },
+               {  1,  38,  73, 16 },
+               {  0,  34,  75, 19 },
+               {  0,  30,  76, 22 }
+       }, {    /* 131072 < Ratio <= 174762 (~8:3) */
+               { 29,  70,  29,  0 },
+               { 26,  68,  32,  2 },
+               { 23,  67,  36,  2 },
+               { 20,  66,  39,  3 },
+               { 17,  65,  43,  3 },
+               { 15,  63,  46,  4 },
+               { 12,  61,  50,  5 },
+               { 10,  58,  53,  7 },
+               {  8,  56,  56,  8 },
+               {  7,  53,  58, 10 },
+               {  5,  50,  61, 12 },
+               {  4,  46,  63, 15 },
+               {  3,  43,  65, 17 },
+               {  3,  39,  66, 20 },
+               {  2,  36,  67, 23 },
+               {  2,  32,  68, 26 }
+       }, {    /* 174762 < Ratio <= 262144 (~8:2) */
+               { 32,  64,  32,  0 },
+               { 28,  63,  34,  3 },
+               { 25,  62,  37,  4 },
+               { 22,  62,  40,  4 },
+               { 19,  61,  43,  5 },
+               { 17,  59,  46,  6 },
+               { 15,  58,  48,  7 },
+               { 13,  55,  51,  9 },
+               { 11,  53,  53, 11 },
+               {  9,  51,  55, 13 },
+               {  7,  48,  58, 15 },
+               {  6,  46,  59, 17 },
+               {  5,  43,  61, 19 },
+               {  4,  40,  62, 22 },
+               {  4,  37,  62, 25 },
+               {  3,  34,  63, 28 }
+       }
+};
+
+static int gsc_sw_reset(struct gsc_context *ctx)
+{
+       u32 cfg;
+       int count = GSC_RESET_TIMEOUT;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       /* s/w reset */
+       cfg = (GSC_SW_RESET_SRESET);
+       gsc_write(cfg, GSC_SW_RESET);
+
+       /* wait s/w reset complete */
+       while (count--) {
+               cfg = gsc_read(GSC_SW_RESET);
+               if (!cfg)
+                       break;
+               usleep_range(1000, 2000);
+       }
+
+       if (cfg) {
+               DRM_ERROR("failed to reset gsc h/w.\n");
+               return -EBUSY;
+       }
+
+       /* reset sequence */
+       cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK);
+       cfg |= (GSC_IN_BASE_ADDR_MASK |
+               GSC_IN_BASE_ADDR_PINGPONG(0));
+       gsc_write(cfg, GSC_IN_BASE_ADDR_Y_MASK);
+       gsc_write(cfg, GSC_IN_BASE_ADDR_CB_MASK);
+       gsc_write(cfg, GSC_IN_BASE_ADDR_CR_MASK);
+
+       cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
+       cfg |= (GSC_OUT_BASE_ADDR_MASK |
+               GSC_OUT_BASE_ADDR_PINGPONG(0));
+       gsc_write(cfg, GSC_OUT_BASE_ADDR_Y_MASK);
+       gsc_write(cfg, GSC_OUT_BASE_ADDR_CB_MASK);
+       gsc_write(cfg, GSC_OUT_BASE_ADDR_CR_MASK);
+
+       return 0;
+}
+
+static void gsc_set_gscblk_fimd_wb(struct gsc_context *ctx, bool enable)
+{
+       u32 gscblk_cfg;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       gscblk_cfg = readl(SYSREG_GSCBLK_CFG1);
+
+       if (enable)
+               gscblk_cfg |= GSC_BLK_DISP1WB_DEST(ctx->id) |
+                               GSC_BLK_GSCL_WB_IN_SRC_SEL(ctx->id) |
+                               GSC_BLK_SW_RESET_WB_DEST(ctx->id);
+       else
+               gscblk_cfg |= GSC_BLK_PXLASYNC_LO_MASK_WB(ctx->id);
+
+       writel(gscblk_cfg, SYSREG_GSCBLK_CFG1);
+}
+
+static void gsc_handle_irq(struct gsc_context *ctx, bool enable,
+               bool overflow, bool done)
+{
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:enable[%d]overflow[%d]level[%d]\n", __func__,
+                       enable, overflow, done);
+
+       cfg = gsc_read(GSC_IRQ);
+       cfg |= (GSC_IRQ_OR_MASK | GSC_IRQ_FRMDONE_MASK);
+
+       if (enable)
+               cfg |= GSC_IRQ_ENABLE;
+       else
+               cfg &= ~GSC_IRQ_ENABLE;
+
+       if (overflow)
+               cfg &= ~GSC_IRQ_OR_MASK;
+       else
+               cfg |= GSC_IRQ_OR_MASK;
+
+       if (done)
+               cfg &= ~GSC_IRQ_FRMDONE_MASK;
+       else
+               cfg |= GSC_IRQ_FRMDONE_MASK;
+
+       gsc_write(cfg, GSC_IRQ);
+}
+
+
+static int gsc_src_set_fmt(struct device *dev, u32 fmt)
+{
+       struct gsc_context *ctx = get_gsc_context(dev);
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+       cfg = gsc_read(GSC_IN_CON);
+       cfg &= ~(GSC_IN_RGB_TYPE_MASK | GSC_IN_YUV422_1P_ORDER_MASK |
+                GSC_IN_CHROMA_ORDER_MASK | GSC_IN_FORMAT_MASK |
+                GSC_IN_TILE_TYPE_MASK | GSC_IN_TILE_MODE |
+                GSC_IN_CHROM_STRIDE_SEL_MASK | GSC_IN_RB_SWAP_MASK);
+
+       switch (fmt) {
+       case DRM_FORMAT_RGB565:
+               cfg |= GSC_IN_RGB565;
+               break;
+       case DRM_FORMAT_XRGB8888:
+               cfg |= GSC_IN_XRGB8888;
+               break;
+       case DRM_FORMAT_BGRX8888:
+               cfg |= (GSC_IN_XRGB8888 | GSC_IN_RB_SWAP);
+               break;
+       case DRM_FORMAT_YUYV:
+               cfg |= (GSC_IN_YUV422_1P |
+                       GSC_IN_YUV422_1P_ORDER_LSB_Y |
+                       GSC_IN_CHROMA_ORDER_CBCR);
+               break;
+       case DRM_FORMAT_YVYU:
+               cfg |= (GSC_IN_YUV422_1P |
+                       GSC_IN_YUV422_1P_ORDER_LSB_Y |
+                       GSC_IN_CHROMA_ORDER_CRCB);
+               break;
+       case DRM_FORMAT_UYVY:
+               cfg |= (GSC_IN_YUV422_1P |
+                       GSC_IN_YUV422_1P_OEDER_LSB_C |
+                       GSC_IN_CHROMA_ORDER_CBCR);
+               break;
+       case DRM_FORMAT_VYUY:
+               cfg |= (GSC_IN_YUV422_1P |
+                       GSC_IN_YUV422_1P_OEDER_LSB_C |
+                       GSC_IN_CHROMA_ORDER_CRCB);
+               break;
+       case DRM_FORMAT_NV21:
+       case DRM_FORMAT_NV61:
+               cfg |= (GSC_IN_CHROMA_ORDER_CRCB |
+                       GSC_IN_YUV420_2P);
+               break;
+       case DRM_FORMAT_YUV422:
+               cfg |= GSC_IN_YUV422_3P;
+               break;
+       case DRM_FORMAT_YUV420:
+       case DRM_FORMAT_YVU420:
+               cfg |= GSC_IN_YUV420_3P;
+               break;
+       case DRM_FORMAT_NV12:
+       case DRM_FORMAT_NV16:
+               cfg |= (GSC_IN_CHROMA_ORDER_CBCR |
+                       GSC_IN_YUV420_2P);
+               break;
+       case DRM_FORMAT_NV12MT:
+               cfg |= (GSC_IN_TILE_C_16x8 | GSC_IN_TILE_MODE);
+               break;
+       default:
+               dev_err(ippdrv->dev, "inavlid target yuv order 0x%x.\n", fmt);
+               return -EINVAL;
+       }
+
+       gsc_write(cfg, GSC_IN_CON);
+
+       return 0;
+}
+
+static int gsc_src_set_transf(struct device *dev,
+               enum drm_exynos_degree degree,
+               enum drm_exynos_flip flip, bool *swap)
+{
+       struct gsc_context *ctx = get_gsc_context(dev);
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__,
+               degree, flip);
+
+       cfg = gsc_read(GSC_IN_CON);
+       cfg &= ~GSC_IN_ROT_MASK;
+
+       switch (degree) {
+       case EXYNOS_DRM_DEGREE_0:
+               if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+                       cfg |= GSC_IN_ROT_XFLIP;
+               if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+                       cfg |= GSC_IN_ROT_YFLIP;
+               break;
+       case EXYNOS_DRM_DEGREE_90:
+               if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+                       cfg |= GSC_IN_ROT_90_XFLIP;
+               else if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+                       cfg |= GSC_IN_ROT_90_YFLIP;
+               else
+                       cfg |= GSC_IN_ROT_90;
+               break;
+       case EXYNOS_DRM_DEGREE_180:
+               cfg |= GSC_IN_ROT_180;
+               break;
+       case EXYNOS_DRM_DEGREE_270:
+               cfg |= GSC_IN_ROT_270;
+               break;
+       default:
+               dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
+               return -EINVAL;
+       }
+
+       gsc_write(cfg, GSC_IN_CON);
+
+       ctx->rotation = cfg &
+               (GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0;
+       *swap = ctx->rotation;
+
+       return 0;
+}
+
+static int gsc_src_set_size(struct device *dev, int swap,
+               struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
+{
+       struct gsc_context *ctx = get_gsc_context(dev);
+       struct drm_exynos_pos img_pos = *pos;
+       struct gsc_scaler *sc = &ctx->sc;
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:swap[%d]x[%d]y[%d]w[%d]h[%d]\n",
+               __func__, swap, pos->x, pos->y, pos->w, pos->h);
+
+       if (swap) {
+               img_pos.w = pos->h;
+               img_pos.h = pos->w;
+       }
+
+       /* pixel offset */
+       cfg = (GSC_SRCIMG_OFFSET_X(img_pos.x) |
+               GSC_SRCIMG_OFFSET_Y(img_pos.y));
+       gsc_write(cfg, GSC_SRCIMG_OFFSET);
+
+       /* cropped size */
+       cfg = (GSC_CROPPED_WIDTH(img_pos.w) |
+               GSC_CROPPED_HEIGHT(img_pos.h));
+       gsc_write(cfg, GSC_CROPPED_SIZE);
+
+       DRM_DEBUG_KMS("%s:hsize[%d]vsize[%d]\n",
+               __func__, sz->hsize, sz->vsize);
+
+       /* original size */
+       cfg = gsc_read(GSC_SRCIMG_SIZE);
+       cfg &= ~(GSC_SRCIMG_HEIGHT_MASK |
+               GSC_SRCIMG_WIDTH_MASK);
+
+       cfg |= (GSC_SRCIMG_WIDTH(sz->hsize) |
+               GSC_SRCIMG_HEIGHT(sz->vsize));
+
+       gsc_write(cfg, GSC_SRCIMG_SIZE);
+
+       cfg = gsc_read(GSC_IN_CON);
+       cfg &= ~GSC_IN_RGB_TYPE_MASK;
+
+       DRM_DEBUG_KMS("%s:width[%d]range[%d]\n",
+               __func__, pos->w, sc->range);
+
+       if (pos->w >= GSC_WIDTH_ITU_709)
+               if (sc->range)
+                       cfg |= GSC_IN_RGB_HD_WIDE;
+               else
+                       cfg |= GSC_IN_RGB_HD_NARROW;
+       else
+               if (sc->range)
+                       cfg |= GSC_IN_RGB_SD_WIDE;
+               else
+                       cfg |= GSC_IN_RGB_SD_NARROW;
+
+       gsc_write(cfg, GSC_IN_CON);
+
+       return 0;
+}
+
+static int gsc_src_set_buf_seq(struct gsc_context *ctx, u32 buf_id,
+               enum drm_exynos_ipp_buf_type buf_type)
+{
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       bool masked;
+       u32 cfg;
+       u32 mask = 0x00000001 << buf_id;
+
+       DRM_DEBUG_KMS("%s:buf_id[%d]buf_type[%d]\n", __func__,
+               buf_id, buf_type);
+
+       /* mask register set */
+       cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK);
+
+       switch (buf_type) {
+       case IPP_BUF_ENQUEUE:
+               masked = false;
+               break;
+       case IPP_BUF_DEQUEUE:
+               masked = true;
+               break;
+       default:
+               dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n");
+               return -EINVAL;
+       }
+
+       /* sequence id */
+       cfg &= ~mask;
+       cfg |= masked << buf_id;
+       gsc_write(cfg, GSC_IN_BASE_ADDR_Y_MASK);
+       gsc_write(cfg, GSC_IN_BASE_ADDR_CB_MASK);
+       gsc_write(cfg, GSC_IN_BASE_ADDR_CR_MASK);
+
+       return 0;
+}
+
+static int gsc_src_set_addr(struct device *dev,
+               struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
+               enum drm_exynos_ipp_buf_type buf_type)
+{
+       struct gsc_context *ctx = get_gsc_context(dev);
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
+       struct drm_exynos_ipp_property *property;
+
+       if (!c_node) {
+               DRM_ERROR("failed to get c_node.\n");
+               return -EFAULT;
+       }
+
+       property = &c_node->property;
+       if (!property) {
+               DRM_ERROR("failed to get property.\n");
+               return -EFAULT;
+       }
+
+       DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__,
+               property->prop_id, buf_id, buf_type);
+
+       if (buf_id > GSC_MAX_SRC) {
+               dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
+               return -EINVAL;
+       }
+
+       /* address register set */
+       switch (buf_type) {
+       case IPP_BUF_ENQUEUE:
+               gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
+                       GSC_IN_BASE_ADDR_Y(buf_id));
+               gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+                       GSC_IN_BASE_ADDR_CB(buf_id));
+               gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+                       GSC_IN_BASE_ADDR_CR(buf_id));
+               break;
+       case IPP_BUF_DEQUEUE:
+               gsc_write(0x0, GSC_IN_BASE_ADDR_Y(buf_id));
+               gsc_write(0x0, GSC_IN_BASE_ADDR_CB(buf_id));
+               gsc_write(0x0, GSC_IN_BASE_ADDR_CR(buf_id));
+               break;
+       default:
+               /* bypass */
+               break;
+       }
+
+       return gsc_src_set_buf_seq(ctx, buf_id, buf_type);
+}
+
+static struct exynos_drm_ipp_ops gsc_src_ops = {
+       .set_fmt = gsc_src_set_fmt,
+       .set_transf = gsc_src_set_transf,
+       .set_size = gsc_src_set_size,
+       .set_addr = gsc_src_set_addr,
+};
+
+static int gsc_dst_set_fmt(struct device *dev, u32 fmt)
+{
+       struct gsc_context *ctx = get_gsc_context(dev);
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+       cfg = gsc_read(GSC_OUT_CON);
+       cfg &= ~(GSC_OUT_RGB_TYPE_MASK | GSC_OUT_YUV422_1P_ORDER_MASK |
+                GSC_OUT_CHROMA_ORDER_MASK | GSC_OUT_FORMAT_MASK |
+                GSC_OUT_CHROM_STRIDE_SEL_MASK | GSC_OUT_RB_SWAP_MASK |
+                GSC_OUT_GLOBAL_ALPHA_MASK);
+
+       switch (fmt) {
+       case DRM_FORMAT_RGB565:
+               cfg |= GSC_OUT_RGB565;
+               break;
+       case DRM_FORMAT_XRGB8888:
+               cfg |= GSC_OUT_XRGB8888;
+               break;
+       case DRM_FORMAT_BGRX8888:
+               cfg |= (GSC_OUT_XRGB8888 | GSC_OUT_RB_SWAP);
+               break;
+       case DRM_FORMAT_YUYV:
+               cfg |= (GSC_OUT_YUV422_1P |
+                       GSC_OUT_YUV422_1P_ORDER_LSB_Y |
+                       GSC_OUT_CHROMA_ORDER_CBCR);
+               break;
+       case DRM_FORMAT_YVYU:
+               cfg |= (GSC_OUT_YUV422_1P |
+                       GSC_OUT_YUV422_1P_ORDER_LSB_Y |
+                       GSC_OUT_CHROMA_ORDER_CRCB);
+               break;
+       case DRM_FORMAT_UYVY:
+               cfg |= (GSC_OUT_YUV422_1P |
+                       GSC_OUT_YUV422_1P_OEDER_LSB_C |
+                       GSC_OUT_CHROMA_ORDER_CBCR);
+               break;
+       case DRM_FORMAT_VYUY:
+               cfg |= (GSC_OUT_YUV422_1P |
+                       GSC_OUT_YUV422_1P_OEDER_LSB_C |
+                       GSC_OUT_CHROMA_ORDER_CRCB);
+               break;
+       case DRM_FORMAT_NV21:
+       case DRM_FORMAT_NV61:
+               cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV420_2P);
+               break;
+       case DRM_FORMAT_YUV422:
+       case DRM_FORMAT_YUV420:
+       case DRM_FORMAT_YVU420:
+               cfg |= GSC_OUT_YUV420_3P;
+               break;
+       case DRM_FORMAT_NV12:
+       case DRM_FORMAT_NV16:
+               cfg |= (GSC_OUT_CHROMA_ORDER_CBCR |
+                       GSC_OUT_YUV420_2P);
+               break;
+       case DRM_FORMAT_NV12MT:
+               cfg |= (GSC_OUT_TILE_C_16x8 | GSC_OUT_TILE_MODE);
+               break;
+       default:
+               dev_err(ippdrv->dev, "inavlid target yuv order 0x%x.\n", fmt);
+               return -EINVAL;
+       }
+
+       gsc_write(cfg, GSC_OUT_CON);
+
+       return 0;
+}
+
+static int gsc_dst_set_transf(struct device *dev,
+               enum drm_exynos_degree degree,
+               enum drm_exynos_flip flip, bool *swap)
+{
+       struct gsc_context *ctx = get_gsc_context(dev);
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__,
+               degree, flip);
+
+       cfg = gsc_read(GSC_IN_CON);
+       cfg &= ~GSC_IN_ROT_MASK;
+
+       switch (degree) {
+       case EXYNOS_DRM_DEGREE_0:
+               if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+                       cfg |= GSC_IN_ROT_XFLIP;
+               if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+                       cfg |= GSC_IN_ROT_YFLIP;
+               break;
+       case EXYNOS_DRM_DEGREE_90:
+               if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+                       cfg |= GSC_IN_ROT_90_XFLIP;
+               else if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+                       cfg |= GSC_IN_ROT_90_YFLIP;
+               else
+                       cfg |= GSC_IN_ROT_90;
+               break;
+       case EXYNOS_DRM_DEGREE_180:
+               cfg |= GSC_IN_ROT_180;
+               break;
+       case EXYNOS_DRM_DEGREE_270:
+               cfg |= GSC_IN_ROT_270;
+               break;
+       default:
+               dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
+               return -EINVAL;
+       }
+
+       gsc_write(cfg, GSC_IN_CON);
+
+       ctx->rotation = cfg &
+               (GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0;
+       *swap = ctx->rotation;
+
+       return 0;
+}
+
+static int gsc_get_ratio_shift(u32 src, u32 dst, u32 *ratio)
+{
+       DRM_DEBUG_KMS("%s:src[%d]dst[%d]\n", __func__, src, dst);
+
+       if (src >= dst * 8) {
+               DRM_ERROR("failed to make ratio and shift.\n");
+               return -EINVAL;
+       } else if (src >= dst * 4)
+               *ratio = 4;
+       else if (src >= dst * 2)
+               *ratio = 2;
+       else
+               *ratio = 1;
+
+       return 0;
+}
+
+static void gsc_get_prescaler_shfactor(u32 hratio, u32 vratio, u32 *shfactor)
+{
+       if (hratio == 4 && vratio == 4)
+               *shfactor = 4;
+       else if ((hratio == 4 && vratio == 2) ||
+                (hratio == 2 && vratio == 4))
+               *shfactor = 3;
+       else if ((hratio == 4 && vratio == 1) ||
+                (hratio == 1 && vratio == 4) ||
+                (hratio == 2 && vratio == 2))
+               *shfactor = 2;
+       else if (hratio == 1 && vratio == 1)
+               *shfactor = 0;
+       else
+               *shfactor = 1;
+}
+
+static int gsc_set_prescaler(struct gsc_context *ctx, struct gsc_scaler *sc,
+               struct drm_exynos_pos *src, struct drm_exynos_pos *dst)
+{
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       u32 cfg;
+       u32 src_w, src_h, dst_w, dst_h;
+       int ret = 0;
+
+       src_w = src->w;
+       src_h = src->h;
+
+       if (ctx->rotation) {
+               dst_w = dst->h;
+               dst_h = dst->w;
+       } else {
+               dst_w = dst->w;
+               dst_h = dst->h;
+       }
+
+       ret = gsc_get_ratio_shift(src_w, dst_w, &sc->pre_hratio);
+       if (ret) {
+               dev_err(ippdrv->dev, "failed to get ratio horizontal.\n");
+               return ret;
+       }
+
+       ret = gsc_get_ratio_shift(src_h, dst_h, &sc->pre_vratio);
+       if (ret) {
+               dev_err(ippdrv->dev, "failed to get ratio vertical.\n");
+               return ret;
+       }
+
+       DRM_DEBUG_KMS("%s:pre_hratio[%d]pre_vratio[%d]\n",
+               __func__, sc->pre_hratio, sc->pre_vratio);
+
+       sc->main_hratio = (src_w << 16) / dst_w;
+       sc->main_vratio = (src_h << 16) / dst_h;
+
+       DRM_DEBUG_KMS("%s:main_hratio[%ld]main_vratio[%ld]\n",
+               __func__, sc->main_hratio, sc->main_vratio);
+
+       gsc_get_prescaler_shfactor(sc->pre_hratio, sc->pre_vratio,
+               &sc->pre_shfactor);
+
+       DRM_DEBUG_KMS("%s:pre_shfactor[%d]\n", __func__,
+               sc->pre_shfactor);
+
+       cfg = (GSC_PRESC_SHFACTOR(sc->pre_shfactor) |
+               GSC_PRESC_H_RATIO(sc->pre_hratio) |
+               GSC_PRESC_V_RATIO(sc->pre_vratio));
+       gsc_write(cfg, GSC_PRE_SCALE_RATIO);
+
+       return ret;
+}
+
+static void gsc_set_h_coef(struct gsc_context *ctx, unsigned long main_hratio)
+{
+       int i, j, k, sc_ratio;
+
+       if (main_hratio <= GSC_SC_UP_MAX_RATIO)
+               sc_ratio = 0;
+       else if (main_hratio <= GSC_SC_DOWN_RATIO_7_8)
+               sc_ratio = 1;
+       else if (main_hratio <= GSC_SC_DOWN_RATIO_6_8)
+               sc_ratio = 2;
+       else if (main_hratio <= GSC_SC_DOWN_RATIO_5_8)
+               sc_ratio = 3;
+       else if (main_hratio <= GSC_SC_DOWN_RATIO_4_8)
+               sc_ratio = 4;
+       else if (main_hratio <= GSC_SC_DOWN_RATIO_3_8)
+               sc_ratio = 5;
+       else
+               sc_ratio = 6;
+
+       for (i = 0; i < GSC_COEF_PHASE; i++)
+               for (j = 0; j < GSC_COEF_H_8T; j++)
+                       for (k = 0; k < GSC_COEF_DEPTH; k++)
+                               gsc_write(h_coef_8t[sc_ratio][i][j],
+                                       GSC_HCOEF(i, j, k));
+}
+
+static void gsc_set_v_coef(struct gsc_context *ctx, unsigned long main_vratio)
+{
+       int i, j, k, sc_ratio;
+
+       if (main_vratio <= GSC_SC_UP_MAX_RATIO)
+               sc_ratio = 0;
+       else if (main_vratio <= GSC_SC_DOWN_RATIO_7_8)
+               sc_ratio = 1;
+       else if (main_vratio <= GSC_SC_DOWN_RATIO_6_8)
+               sc_ratio = 2;
+       else if (main_vratio <= GSC_SC_DOWN_RATIO_5_8)
+               sc_ratio = 3;
+       else if (main_vratio <= GSC_SC_DOWN_RATIO_4_8)
+               sc_ratio = 4;
+       else if (main_vratio <= GSC_SC_DOWN_RATIO_3_8)
+               sc_ratio = 5;
+       else
+               sc_ratio = 6;
+
+       for (i = 0; i < GSC_COEF_PHASE; i++)
+               for (j = 0; j < GSC_COEF_V_4T; j++)
+                       for (k = 0; k < GSC_COEF_DEPTH; k++)
+                               gsc_write(v_coef_4t[sc_ratio][i][j],
+                                       GSC_VCOEF(i, j, k));
+}
+
+static void gsc_set_scaler(struct gsc_context *ctx, struct gsc_scaler *sc)
+{
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:main_hratio[%ld]main_vratio[%ld]\n",
+               __func__, sc->main_hratio, sc->main_vratio);
+
+       gsc_set_h_coef(ctx, sc->main_hratio);
+       cfg = GSC_MAIN_H_RATIO_VALUE(sc->main_hratio);
+       gsc_write(cfg, GSC_MAIN_H_RATIO);
+
+       gsc_set_v_coef(ctx, sc->main_vratio);
+       cfg = GSC_MAIN_V_RATIO_VALUE(sc->main_vratio);
+       gsc_write(cfg, GSC_MAIN_V_RATIO);
+}
+
+static int gsc_dst_set_size(struct device *dev, int swap,
+               struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
+{
+       struct gsc_context *ctx = get_gsc_context(dev);
+       struct drm_exynos_pos img_pos = *pos;
+       struct gsc_scaler *sc = &ctx->sc;
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:swap[%d]x[%d]y[%d]w[%d]h[%d]\n",
+               __func__, swap, pos->x, pos->y, pos->w, pos->h);
+
+       if (swap) {
+               img_pos.w = pos->h;
+               img_pos.h = pos->w;
+       }
+
+       /* pixel offset */
+       cfg = (GSC_DSTIMG_OFFSET_X(pos->x) |
+               GSC_DSTIMG_OFFSET_Y(pos->y));
+       gsc_write(cfg, GSC_DSTIMG_OFFSET);
+
+       /* scaled size */
+       cfg = (GSC_SCALED_WIDTH(img_pos.w) | GSC_SCALED_HEIGHT(img_pos.h));
+       gsc_write(cfg, GSC_SCALED_SIZE);
+
+       DRM_DEBUG_KMS("%s:hsize[%d]vsize[%d]\n",
+               __func__, sz->hsize, sz->vsize);
+
+       /* original size */
+       cfg = gsc_read(GSC_DSTIMG_SIZE);
+       cfg &= ~(GSC_DSTIMG_HEIGHT_MASK |
+               GSC_DSTIMG_WIDTH_MASK);
+       cfg |= (GSC_DSTIMG_WIDTH(sz->hsize) |
+               GSC_DSTIMG_HEIGHT(sz->vsize));
+       gsc_write(cfg, GSC_DSTIMG_SIZE);
+
+       cfg = gsc_read(GSC_OUT_CON);
+       cfg &= ~GSC_OUT_RGB_TYPE_MASK;
+
+       DRM_DEBUG_KMS("%s:width[%d]range[%d]\n",
+               __func__, pos->w, sc->range);
+
+       if (pos->w >= GSC_WIDTH_ITU_709)
+               if (sc->range)
+                       cfg |= GSC_OUT_RGB_HD_WIDE;
+               else
+                       cfg |= GSC_OUT_RGB_HD_NARROW;
+       else
+               if (sc->range)
+                       cfg |= GSC_OUT_RGB_SD_WIDE;
+               else
+                       cfg |= GSC_OUT_RGB_SD_NARROW;
+
+       gsc_write(cfg, GSC_OUT_CON);
+
+       return 0;
+}
+
+static int gsc_dst_get_buf_seq(struct gsc_context *ctx)
+{
+       u32 cfg, i, buf_num = GSC_REG_SZ;
+       u32 mask = 0x00000001;
+
+       cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
+
+       for (i = 0; i < GSC_REG_SZ; i++)
+               if (cfg & (mask << i))
+                       buf_num--;
+
+       DRM_DEBUG_KMS("%s:buf_num[%d]\n", __func__, buf_num);
+
+       return buf_num;
+}
+
+static int gsc_dst_set_buf_seq(struct gsc_context *ctx, u32 buf_id,
+               enum drm_exynos_ipp_buf_type buf_type)
+{
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       bool masked;
+       u32 cfg;
+       u32 mask = 0x00000001 << buf_id;
+       int ret = 0;
+
+       DRM_DEBUG_KMS("%s:buf_id[%d]buf_type[%d]\n", __func__,
+               buf_id, buf_type);
+
+       mutex_lock(&ctx->lock);
+
+       /* mask register set */
+       cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
+
+       switch (buf_type) {
+       case IPP_BUF_ENQUEUE:
+               masked = false;
+               break;
+       case IPP_BUF_DEQUEUE:
+               masked = true;
+               break;
+       default:
+               dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n");
+               ret =  -EINVAL;
+               goto err_unlock;
+       }
+
+       /* sequence id */
+       cfg &= ~mask;
+       cfg |= masked << buf_id;
+       gsc_write(cfg, GSC_OUT_BASE_ADDR_Y_MASK);
+       gsc_write(cfg, GSC_OUT_BASE_ADDR_CB_MASK);
+       gsc_write(cfg, GSC_OUT_BASE_ADDR_CR_MASK);
+
+       /* interrupt enable */
+       if (buf_type == IPP_BUF_ENQUEUE &&
+           gsc_dst_get_buf_seq(ctx) >= GSC_BUF_START)
+               gsc_handle_irq(ctx, true, false, true);
+
+       /* interrupt disable */
+       if (buf_type == IPP_BUF_DEQUEUE &&
+           gsc_dst_get_buf_seq(ctx) <= GSC_BUF_STOP)
+               gsc_handle_irq(ctx, false, false, true);
+
+err_unlock:
+       mutex_unlock(&ctx->lock);
+       return ret;
+}
+
+static int gsc_dst_set_addr(struct device *dev,
+               struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
+               enum drm_exynos_ipp_buf_type buf_type)
+{
+       struct gsc_context *ctx = get_gsc_context(dev);
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
+       struct drm_exynos_ipp_property *property;
+
+       if (!c_node) {
+               DRM_ERROR("failed to get c_node.\n");
+               return -EFAULT;
+       }
+
+       property = &c_node->property;
+       if (!property) {
+               DRM_ERROR("failed to get property.\n");
+               return -EFAULT;
+       }
+
+       DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__,
+               property->prop_id, buf_id, buf_type);
+
+       if (buf_id > GSC_MAX_DST) {
+               dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
+               return -EINVAL;
+       }
+
+       /* address register set */
+       switch (buf_type) {
+       case IPP_BUF_ENQUEUE:
+               gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
+                       GSC_OUT_BASE_ADDR_Y(buf_id));
+               gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+                       GSC_OUT_BASE_ADDR_CB(buf_id));
+               gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+                       GSC_OUT_BASE_ADDR_CR(buf_id));
+               break;
+       case IPP_BUF_DEQUEUE:
+               gsc_write(0x0, GSC_OUT_BASE_ADDR_Y(buf_id));
+               gsc_write(0x0, GSC_OUT_BASE_ADDR_CB(buf_id));
+               gsc_write(0x0, GSC_OUT_BASE_ADDR_CR(buf_id));
+               break;
+       default:
+               /* bypass */
+               break;
+       }
+
+       return gsc_dst_set_buf_seq(ctx, buf_id, buf_type);
+}
+
+static struct exynos_drm_ipp_ops gsc_dst_ops = {
+       .set_fmt = gsc_dst_set_fmt,
+       .set_transf = gsc_dst_set_transf,
+       .set_size = gsc_dst_set_size,
+       .set_addr = gsc_dst_set_addr,
+};
+
+static int gsc_clk_ctrl(struct gsc_context *ctx, bool enable)
+{
+       DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
+
+       if (enable) {
+               clk_enable(ctx->gsc_clk);
+               ctx->suspended = false;
+       } else {
+               clk_disable(ctx->gsc_clk);
+               ctx->suspended = true;
+       }
+
+       return 0;
+}
+
+static int gsc_get_src_buf_index(struct gsc_context *ctx)
+{
+       u32 cfg, curr_index, i;
+       u32 buf_id = GSC_MAX_SRC;
+       int ret;
+
+       DRM_DEBUG_KMS("%s:gsc id[%d]\n", __func__, ctx->id);
+
+       cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK);
+       curr_index = GSC_IN_CURR_GET_INDEX(cfg);
+
+       for (i = curr_index; i < GSC_MAX_SRC; i++) {
+               if (!((cfg >> i) & 0x1)) {
+                       buf_id = i;
+                       break;
+               }
+       }
+
+       if (buf_id == GSC_MAX_SRC) {
+               DRM_ERROR("failed to get in buffer index.\n");
+               return -EINVAL;
+       }
+
+       ret = gsc_src_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE);
+       if (ret < 0) {
+               DRM_ERROR("failed to dequeue.\n");
+               return ret;
+       }
+
+       DRM_DEBUG_KMS("%s:cfg[0x%x]curr_index[%d]buf_id[%d]\n", __func__, cfg,
+               curr_index, buf_id);
+
+       return buf_id;
+}
+
+static int gsc_get_dst_buf_index(struct gsc_context *ctx)
+{
+       u32 cfg, curr_index, i;
+       u32 buf_id = GSC_MAX_DST;
+       int ret;
+
+       DRM_DEBUG_KMS("%s:gsc id[%d]\n", __func__, ctx->id);
+
+       cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
+       curr_index = GSC_OUT_CURR_GET_INDEX(cfg);
+
+       for (i = curr_index; i < GSC_MAX_DST; i++) {
+               if (!((cfg >> i) & 0x1)) {
+                       buf_id = i;
+                       break;
+               }
+       }
+
+       if (buf_id == GSC_MAX_DST) {
+               DRM_ERROR("failed to get out buffer index.\n");
+               return -EINVAL;
+       }
+
+       ret = gsc_dst_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE);
+       if (ret < 0) {
+               DRM_ERROR("failed to dequeue.\n");
+               return ret;
+       }
+
+       DRM_DEBUG_KMS("%s:cfg[0x%x]curr_index[%d]buf_id[%d]\n", __func__, cfg,
+               curr_index, buf_id);
+
+       return buf_id;
+}
+
+static irqreturn_t gsc_irq_handler(int irq, void *dev_id)
+{
+       struct gsc_context *ctx = dev_id;
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
+       struct drm_exynos_ipp_event_work *event_work =
+               c_node->event_work;
+       u32 status;
+       int buf_id[EXYNOS_DRM_OPS_MAX];
+
+       DRM_DEBUG_KMS("%s:gsc id[%d]\n", __func__, ctx->id);
+
+       status = gsc_read(GSC_IRQ);
+       if (status & GSC_IRQ_STATUS_OR_IRQ) {
+               dev_err(ippdrv->dev, "occured overflow at %d, status 0x%x.\n",
+                       ctx->id, status);
+               return IRQ_NONE;
+       }
+
+       if (status & GSC_IRQ_STATUS_OR_FRM_DONE) {
+               dev_dbg(ippdrv->dev, "occured frame done at %d, status 0x%x.\n",
+                       ctx->id, status);
+
+               buf_id[EXYNOS_DRM_OPS_SRC] = gsc_get_src_buf_index(ctx);
+               if (buf_id[EXYNOS_DRM_OPS_SRC] < 0)
+                       return IRQ_HANDLED;
+
+               buf_id[EXYNOS_DRM_OPS_DST] = gsc_get_dst_buf_index(ctx);
+               if (buf_id[EXYNOS_DRM_OPS_DST] < 0)
+                       return IRQ_HANDLED;
+
+               DRM_DEBUG_KMS("%s:buf_id_src[%d]buf_id_dst[%d]\n", __func__,
+                       buf_id[EXYNOS_DRM_OPS_SRC], buf_id[EXYNOS_DRM_OPS_DST]);
+
+               event_work->ippdrv = ippdrv;
+               event_work->buf_id[EXYNOS_DRM_OPS_SRC] =
+                       buf_id[EXYNOS_DRM_OPS_SRC];
+               event_work->buf_id[EXYNOS_DRM_OPS_DST] =
+                       buf_id[EXYNOS_DRM_OPS_DST];
+               queue_work(ippdrv->event_workq,
+                       (struct work_struct *)event_work);
+       }
+
+       return IRQ_HANDLED;
+}
+
+static int gsc_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
+{
+       struct drm_exynos_ipp_prop_list *prop_list;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
+       if (!prop_list) {
+               DRM_ERROR("failed to alloc property list.\n");
+               return -ENOMEM;
+       }
+
+       prop_list->version = 1;
+       prop_list->writeback = 1;
+       prop_list->refresh_min = GSC_REFRESH_MIN;
+       prop_list->refresh_max = GSC_REFRESH_MAX;
+       prop_list->flip = (1 << EXYNOS_DRM_FLIP_VERTICAL) |
+                               (1 << EXYNOS_DRM_FLIP_HORIZONTAL);
+       prop_list->degree = (1 << EXYNOS_DRM_DEGREE_0) |
+                               (1 << EXYNOS_DRM_DEGREE_90) |
+                               (1 << EXYNOS_DRM_DEGREE_180) |
+                               (1 << EXYNOS_DRM_DEGREE_270);
+       prop_list->csc = 1;
+       prop_list->crop = 1;
+       prop_list->crop_max.hsize = GSC_CROP_MAX;
+       prop_list->crop_max.vsize = GSC_CROP_MAX;
+       prop_list->crop_min.hsize = GSC_CROP_MIN;
+       prop_list->crop_min.vsize = GSC_CROP_MIN;
+       prop_list->scale = 1;
+       prop_list->scale_max.hsize = GSC_SCALE_MAX;
+       prop_list->scale_max.vsize = GSC_SCALE_MAX;
+       prop_list->scale_min.hsize = GSC_SCALE_MIN;
+       prop_list->scale_min.vsize = GSC_SCALE_MIN;
+
+       ippdrv->prop_list = prop_list;
+
+       return 0;
+}
+
+static inline bool gsc_check_drm_flip(enum drm_exynos_flip flip)
+{
+       switch (flip) {
+       case EXYNOS_DRM_FLIP_NONE:
+       case EXYNOS_DRM_FLIP_VERTICAL:
+       case EXYNOS_DRM_FLIP_HORIZONTAL:
+       case EXYNOS_DRM_FLIP_VERTICAL | EXYNOS_DRM_FLIP_HORIZONTAL:
+               return true;
+       default:
+               DRM_DEBUG_KMS("%s:invalid flip\n", __func__);
+               return false;
+       }
+}
+
+static int gsc_ippdrv_check_property(struct device *dev,
+               struct drm_exynos_ipp_property *property)
+{
+       struct gsc_context *ctx = get_gsc_context(dev);
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       struct drm_exynos_ipp_prop_list *pp = ippdrv->prop_list;
+       struct drm_exynos_ipp_config *config;
+       struct drm_exynos_pos *pos;
+       struct drm_exynos_sz *sz;
+       bool swap;
+       int i;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       for_each_ipp_ops(i) {
+               if ((i == EXYNOS_DRM_OPS_SRC) &&
+                       (property->cmd == IPP_CMD_WB))
+                       continue;
+
+               config = &property->config[i];
+               pos = &config->pos;
+               sz = &config->sz;
+
+               /* check for flip */
+               if (!gsc_check_drm_flip(config->flip)) {
+                       DRM_ERROR("invalid flip.\n");
+                       goto err_property;
+               }
+
+               /* check for degree */
+               switch (config->degree) {
+               case EXYNOS_DRM_DEGREE_90:
+               case EXYNOS_DRM_DEGREE_270:
+                       swap = true;
+                       break;
+               case EXYNOS_DRM_DEGREE_0:
+               case EXYNOS_DRM_DEGREE_180:
+                       swap = false;
+                       break;
+               default:
+                       DRM_ERROR("invalid degree.\n");
+                       goto err_property;
+               }
+
+               /* check for buffer bound */
+               if ((pos->x + pos->w > sz->hsize) ||
+                       (pos->y + pos->h > sz->vsize)) {
+                       DRM_ERROR("out of buf bound.\n");
+                       goto err_property;
+               }
+
+               /* check for crop */
+               if ((i == EXYNOS_DRM_OPS_SRC) && (pp->crop)) {
+                       if (swap) {
+                               if ((pos->h < pp->crop_min.hsize) ||
+                                       (sz->vsize > pp->crop_max.hsize) ||
+                                       (pos->w < pp->crop_min.vsize) ||
+                                       (sz->hsize > pp->crop_max.vsize)) {
+                                       DRM_ERROR("out of crop size.\n");
+                                       goto err_property;
+                               }
+                       } else {
+                               if ((pos->w < pp->crop_min.hsize) ||
+                                       (sz->hsize > pp->crop_max.hsize) ||
+                                       (pos->h < pp->crop_min.vsize) ||
+                                       (sz->vsize > pp->crop_max.vsize)) {
+                                       DRM_ERROR("out of crop size.\n");
+                                       goto err_property;
+                               }
+                       }
+               }
+
+               /* check for scale */
+               if ((i == EXYNOS_DRM_OPS_DST) && (pp->scale)) {
+                       if (swap) {
+                               if ((pos->h < pp->scale_min.hsize) ||
+                                       (sz->vsize > pp->scale_max.hsize) ||
+                                       (pos->w < pp->scale_min.vsize) ||
+                                       (sz->hsize > pp->scale_max.vsize)) {
+                                       DRM_ERROR("out of scale size.\n");
+                                       goto err_property;
+                               }
+                       } else {
+                               if ((pos->w < pp->scale_min.hsize) ||
+                                       (sz->hsize > pp->scale_max.hsize) ||
+                                       (pos->h < pp->scale_min.vsize) ||
+                                       (sz->vsize > pp->scale_max.vsize)) {
+                                       DRM_ERROR("out of scale size.\n");
+                                       goto err_property;
+                               }
+                       }
+               }
+       }
+
+       return 0;
+
+err_property:
+       for_each_ipp_ops(i) {
+               if ((i == EXYNOS_DRM_OPS_SRC) &&
+                       (property->cmd == IPP_CMD_WB))
+                       continue;
+
+               config = &property->config[i];
+               pos = &config->pos;
+               sz = &config->sz;
+
+               DRM_ERROR("[%s]f[%d]r[%d]pos[%d %d %d %d]sz[%d %d]\n",
+                       i ? "dst" : "src", config->flip, config->degree,
+                       pos->x, pos->y, pos->w, pos->h,
+                       sz->hsize, sz->vsize);
+       }
+
+       return -EINVAL;
+}
+
+
+static int gsc_ippdrv_reset(struct device *dev)
+{
+       struct gsc_context *ctx = get_gsc_context(dev);
+       struct gsc_scaler *sc = &ctx->sc;
+       int ret;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       /* reset h/w block */
+       ret = gsc_sw_reset(ctx);
+       if (ret < 0) {
+               dev_err(dev, "failed to reset hardware.\n");
+               return ret;
+       }
+
+       /* scaler setting */
+       memset(&ctx->sc, 0x0, sizeof(ctx->sc));
+       sc->range = true;
+
+       return 0;
+}
+
+static int gsc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
+{
+       struct gsc_context *ctx = get_gsc_context(dev);
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+       struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
+       struct drm_exynos_ipp_property *property;
+       struct drm_exynos_ipp_config *config;
+       struct drm_exynos_pos   img_pos[EXYNOS_DRM_OPS_MAX];
+       struct drm_exynos_ipp_set_wb set_wb;
+       u32 cfg;
+       int ret, i;
+
+       DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd);
+
+       if (!c_node) {
+               DRM_ERROR("failed to get c_node.\n");
+               return -EINVAL;
+       }
+
+       property = &c_node->property;
+       if (!property) {
+               DRM_ERROR("failed to get property.\n");
+               return -EINVAL;
+       }
+
+       gsc_handle_irq(ctx, true, false, true);
+
+       for_each_ipp_ops(i) {
+               config = &property->config[i];
+               img_pos[i] = config->pos;
+       }
+
+       switch (cmd) {
+       case IPP_CMD_M2M:
+               /* enable one shot */
+               cfg = gsc_read(GSC_ENABLE);
+               cfg &= ~(GSC_ENABLE_ON_CLEAR_MASK |
+                       GSC_ENABLE_CLK_GATE_MODE_MASK);
+               cfg |= GSC_ENABLE_ON_CLEAR_ONESHOT;
+               gsc_write(cfg, GSC_ENABLE);
+
+               /* src dma memory */
+               cfg = gsc_read(GSC_IN_CON);
+               cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK);
+               cfg |= GSC_IN_PATH_MEMORY;
+               gsc_write(cfg, GSC_IN_CON);
+
+               /* dst dma memory */
+               cfg = gsc_read(GSC_OUT_CON);
+               cfg |= GSC_OUT_PATH_MEMORY;
+               gsc_write(cfg, GSC_OUT_CON);
+               break;
+       case IPP_CMD_WB:
+               set_wb.enable = 1;
+               set_wb.refresh = property->refresh_rate;
+               gsc_set_gscblk_fimd_wb(ctx, set_wb.enable);
+               exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
+
+               /* src local path */
+               cfg = readl(GSC_IN_CON);
+               cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK);
+               cfg |= (GSC_IN_PATH_LOCAL | GSC_IN_LOCAL_FIMD_WB);
+               gsc_write(cfg, GSC_IN_CON);
+
+               /* dst dma memory */
+               cfg = gsc_read(GSC_OUT_CON);
+               cfg |= GSC_OUT_PATH_MEMORY;
+               gsc_write(cfg, GSC_OUT_CON);
+               break;
+       case IPP_CMD_OUTPUT:
+               /* src dma memory */
+               cfg = gsc_read(GSC_IN_CON);
+               cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK);
+               cfg |= GSC_IN_PATH_MEMORY;
+               gsc_write(cfg, GSC_IN_CON);
+
+               /* dst local path */
+               cfg = gsc_read(GSC_OUT_CON);
+               cfg |= GSC_OUT_PATH_MEMORY;
+               gsc_write(cfg, GSC_OUT_CON);
+               break;
+       default:
+               ret = -EINVAL;
+               dev_err(dev, "invalid operations.\n");
+               return ret;
+       }
+
+       ret = gsc_set_prescaler(ctx, &ctx->sc,
+               &img_pos[EXYNOS_DRM_OPS_SRC],
+               &img_pos[EXYNOS_DRM_OPS_DST]);
+       if (ret) {
+               dev_err(dev, "failed to set precalser.\n");
+               return ret;
+       }
+
+       gsc_set_scaler(ctx, &ctx->sc);
+
+       cfg = gsc_read(GSC_ENABLE);
+       cfg |= GSC_ENABLE_ON;
+       gsc_write(cfg, GSC_ENABLE);
+
+       return 0;
+}
+
+static void gsc_ippdrv_stop(struct device *dev, enum drm_exynos_ipp_cmd cmd)
+{
+       struct gsc_context *ctx = get_gsc_context(dev);
+       struct drm_exynos_ipp_set_wb set_wb = {0, 0};
+       u32 cfg;
+
+       DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd);
+
+       switch (cmd) {
+       case IPP_CMD_M2M:
+               /* bypass */
+               break;
+       case IPP_CMD_WB:
+               gsc_set_gscblk_fimd_wb(ctx, set_wb.enable);
+               exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
+               break;
+       case IPP_CMD_OUTPUT:
+       default:
+               dev_err(dev, "invalid operations.\n");
+               break;
+       }
+
+       gsc_handle_irq(ctx, false, false, true);
+
+       /* reset sequence */
+       gsc_write(0xff, GSC_OUT_BASE_ADDR_Y_MASK);
+       gsc_write(0xff, GSC_OUT_BASE_ADDR_CB_MASK);
+       gsc_write(0xff, GSC_OUT_BASE_ADDR_CR_MASK);
+
+       cfg = gsc_read(GSC_ENABLE);
+       cfg &= ~GSC_ENABLE_ON;
+       gsc_write(cfg, GSC_ENABLE);
+}
+
+static int __devinit gsc_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct gsc_context *ctx;
+       struct resource *res;
+       struct exynos_drm_ippdrv *ippdrv;
+       int ret;
+
+       ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+       if (!ctx)
+               return -ENOMEM;
+
+       /* clock control */
+       ctx->gsc_clk = clk_get(dev, "gscl");
+       if (IS_ERR(ctx->gsc_clk)) {
+               dev_err(dev, "failed to get gsc clock.\n");
+               ret = PTR_ERR(ctx->gsc_clk);
+               goto err_ctx;
+       }
+
+       /* resource memory */
+       ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!ctx->regs_res) {
+               dev_err(dev, "failed to find registers.\n");
+               ret = -ENOENT;
+               goto err_clk;
+       }
+
+       ctx->regs = devm_request_and_ioremap(dev, ctx->regs_res);
+       if (!ctx->regs) {
+               dev_err(dev, "failed to map registers.\n");
+               ret = -ENXIO;
+               goto err_clk;
+       }
+
+       /* resource irq */
+       res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+       if (!res) {
+               dev_err(dev, "failed to request irq resource.\n");
+               ret = -ENOENT;
+               goto err_get_regs;
+       }
+
+       ctx->irq = res->start;
+       ret = request_threaded_irq(ctx->irq, NULL, gsc_irq_handler,
+               IRQF_ONESHOT, "drm_gsc", ctx);
+       if (ret < 0) {
+               dev_err(dev, "failed to request irq.\n");
+               goto err_get_regs;
+       }
+
+       /* context initailization */
+       ctx->id = pdev->id;
+
+       ippdrv = &ctx->ippdrv;
+       ippdrv->dev = dev;
+       ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &gsc_src_ops;
+       ippdrv->ops[EXYNOS_DRM_OPS_DST] = &gsc_dst_ops;
+       ippdrv->check_property = gsc_ippdrv_check_property;
+       ippdrv->reset = gsc_ippdrv_reset;
+       ippdrv->start = gsc_ippdrv_start;
+       ippdrv->stop = gsc_ippdrv_stop;
+       ret = gsc_init_prop_list(ippdrv);
+       if (ret < 0) {
+               dev_err(dev, "failed to init property list.\n");
+               goto err_get_irq;
+       }
+
+       DRM_DEBUG_KMS("%s:id[%d]ippdrv[0x%x]\n", __func__, ctx->id,
+               (int)ippdrv);
+
+       mutex_init(&ctx->lock);
+       platform_set_drvdata(pdev, ctx);
+
+       pm_runtime_set_active(dev);
+       pm_runtime_enable(dev);
+
+       ret = exynos_drm_ippdrv_register(ippdrv);
+       if (ret < 0) {
+               dev_err(dev, "failed to register drm gsc device.\n");
+               goto err_ippdrv_register;
+       }
+
+       dev_info(&pdev->dev, "drm gsc registered successfully.\n");
+
+       return 0;
+
+err_ippdrv_register:
+       devm_kfree(dev, ippdrv->prop_list);
+       pm_runtime_disable(dev);
+err_get_irq:
+       free_irq(ctx->irq, ctx);
+err_get_regs:
+       devm_iounmap(dev, ctx->regs);
+err_clk:
+       clk_put(ctx->gsc_clk);
+err_ctx:
+       devm_kfree(dev, ctx);
+       return ret;
+}
+
+static int __devexit gsc_remove(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct gsc_context *ctx = get_gsc_context(dev);
+       struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+
+       devm_kfree(dev, ippdrv->prop_list);
+       exynos_drm_ippdrv_unregister(ippdrv);
+       mutex_destroy(&ctx->lock);
+
+       pm_runtime_set_suspended(dev);
+       pm_runtime_disable(dev);
+
+       free_irq(ctx->irq, ctx);
+       devm_iounmap(dev, ctx->regs);
+
+       clk_put(ctx->gsc_clk);
+
+       devm_kfree(dev, ctx);
+
+       return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int gsc_suspend(struct device *dev)
+{
+       struct gsc_context *ctx = get_gsc_context(dev);
+
+       DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+       if (pm_runtime_suspended(dev))
+               return 0;
+
+       return gsc_clk_ctrl(ctx, false);
+}
+
+static int gsc_resume(struct device *dev)
+{
+       struct gsc_context *ctx = get_gsc_context(dev);
+
+       DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+       if (!pm_runtime_suspended(dev))
+               return gsc_clk_ctrl(ctx, true);
+
+       return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int gsc_runtime_suspend(struct device *dev)
+{
+       struct gsc_context *ctx = get_gsc_context(dev);
+
+       DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+       return  gsc_clk_ctrl(ctx, false);
+}
+
+static int gsc_runtime_resume(struct device *dev)
+{
+       struct gsc_context *ctx = get_gsc_context(dev);
+
+       DRM_DEBUG_KMS("%s:id[%d]\n", __FILE__, ctx->id);
+
+       return  gsc_clk_ctrl(ctx, true);
+}
+#endif
+
+static const struct dev_pm_ops gsc_pm_ops = {
+       SET_SYSTEM_SLEEP_PM_OPS(gsc_suspend, gsc_resume)
+       SET_RUNTIME_PM_OPS(gsc_runtime_suspend, gsc_runtime_resume, NULL)
+};
+
+struct platform_driver gsc_driver = {
+       .probe          = gsc_probe,
+       .remove         = __devexit_p(gsc_remove),
+       .driver         = {
+               .name   = "exynos-drm-gsc",
+               .owner  = THIS_MODULE,
+               .pm     = &gsc_pm_ops,
+       },
+};
+
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.h b/drivers/gpu/drm/exynos/exynos_drm_gsc.h
new file mode 100644 (file)
index 0000000..b3c3bc6
--- /dev/null
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ *
+ * Authors:
+ *     Eunchul Kim <chulspro.kim@samsung.com>
+ *     Jinyoung Jeon <jy0.jeon@samsung.com>
+ *     Sangmin Lee <lsmin.lee@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_DRM_GSC_H_
+#define _EXYNOS_DRM_GSC_H_
+
+/*
+ * TODO
+ * FIMD output interface notifier callback.
+ * Mixer output interface notifier callback.
+ */
+
+#endif /* _EXYNOS_DRM_GSC_H_ */
index c3b9e2b..55793c4 100644 (file)
@@ -29,6 +29,9 @@
 #define get_ctx_from_subdrv(subdrv)    container_of(subdrv,\
                                        struct drm_hdmi_context, subdrv);
 
+/* platform device pointer for common drm hdmi device. */
+static struct platform_device *exynos_drm_hdmi_pdev;
+
 /* Common hdmi subdrv needs to access the hdmi and mixer though context.
 * These should be initialied by the repective drivers */
 static struct exynos_drm_hdmi_context *hdmi_ctx;
@@ -46,6 +49,25 @@ struct drm_hdmi_context {
        bool    enabled[MIXER_WIN_NR];
 };
 
+int exynos_platform_device_hdmi_register(void)
+{
+       if (exynos_drm_hdmi_pdev)
+               return -EEXIST;
+
+       exynos_drm_hdmi_pdev = platform_device_register_simple(
+                       "exynos-drm-hdmi", -1, NULL, 0);
+       if (IS_ERR_OR_NULL(exynos_drm_hdmi_pdev))
+               return PTR_ERR(exynos_drm_hdmi_pdev);
+
+       return 0;
+}
+
+void exynos_platform_device_hdmi_unregister(void)
+{
+       if (exynos_drm_hdmi_pdev)
+               platform_device_unregister(exynos_drm_hdmi_pdev);
+}
+
 void exynos_hdmi_drv_attach(struct exynos_drm_hdmi_context *ctx)
 {
        if (ctx)
@@ -157,6 +179,16 @@ static void drm_hdmi_disable_vblank(struct device *subdrv_dev)
                return mixer_ops->disable_vblank(ctx->mixer_ctx->ctx);
 }
 
+static void drm_hdmi_wait_for_vblank(struct device *subdrv_dev)
+{
+       struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+
+       DRM_DEBUG_KMS("%s\n", __FILE__);
+
+       if (mixer_ops && mixer_ops->wait_for_vblank)
+               mixer_ops->wait_for_vblank(ctx->mixer_ctx->ctx);
+}
+
 static void drm_hdmi_mode_fixup(struct device *subdrv_dev,
                                struct drm_connector *connector,
                                const struct drm_display_mode *mode,
@@ -238,6 +270,7 @@ static struct exynos_drm_manager_ops drm_hdmi_manager_ops = {
        .apply = drm_hdmi_apply,
        .enable_vblank = drm_hdmi_enable_vblank,
        .disable_vblank = drm_hdmi_disable_vblank,
+       .wait_for_vblank = drm_hdmi_wait_for_vblank,
        .mode_fixup = drm_hdmi_mode_fixup,
        .mode_set = drm_hdmi_mode_set,
        .get_max_resol = drm_hdmi_get_max_resol,
@@ -291,21 +324,10 @@ static void drm_mixer_disable(struct device *subdrv_dev, int zpos)
        ctx->enabled[win] = false;
 }
 
-static void drm_mixer_wait_for_vblank(struct device *subdrv_dev)
-{
-       struct drm_hdmi_context *ctx = to_context(subdrv_dev);
-
-       DRM_DEBUG_KMS("%s\n", __FILE__);
-
-       if (mixer_ops && mixer_ops->wait_for_vblank)
-               mixer_ops->wait_for_vblank(ctx->mixer_ctx->ctx);
-}
-
 static struct exynos_drm_overlay_ops drm_hdmi_overlay_ops = {
        .mode_set = drm_mixer_mode_set,
        .commit = drm_mixer_commit,
        .disable = drm_mixer_disable,
-       .wait_for_vblank = drm_mixer_wait_for_vblank,
 };
 
 static struct exynos_drm_manager hdmi_manager = {
@@ -346,9 +368,23 @@ static int hdmi_subdrv_probe(struct drm_device *drm_dev,
        ctx->hdmi_ctx->drm_dev = drm_dev;
        ctx->mixer_ctx->drm_dev = drm_dev;
 
+       if (mixer_ops->iommu_on)
+               mixer_ops->iommu_on(ctx->mixer_ctx->ctx, true);
+
        return 0;
 }
 
+static void hdmi_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
+{
+       struct drm_hdmi_context *ctx;
+       struct exynos_drm_subdrv *subdrv = to_subdrv(dev);
+
+       ctx = get_ctx_from_subdrv(subdrv);
+
+       if (mixer_ops->iommu_on)
+               mixer_ops->iommu_on(ctx->mixer_ctx->ctx, false);
+}
+
 static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
@@ -368,6 +404,7 @@ static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev)
        subdrv->dev = dev;
        subdrv->manager = &hdmi_manager;
        subdrv->probe = hdmi_subdrv_probe;
+       subdrv->remove = hdmi_subdrv_remove;
 
        platform_set_drvdata(pdev, subdrv);
 
index 2da5ffd..fcc3093 100644 (file)
@@ -62,12 +62,13 @@ struct exynos_hdmi_ops {
 
 struct exynos_mixer_ops {
        /* manager */
+       int (*iommu_on)(void *ctx, bool enable);
        int (*enable_vblank)(void *ctx, int pipe);
        void (*disable_vblank)(void *ctx);
+       void (*wait_for_vblank)(void *ctx);
        void (*dpms)(void *ctx, int mode);
 
        /* overlay */
-       void (*wait_for_vblank)(void *ctx);
        void (*win_mode_set)(void *ctx, struct exynos_drm_overlay *overlay);
        void (*win_commit)(void *ctx, int zpos);
        void (*win_disable)(void *ctx, int zpos);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.c b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
new file mode 100644 (file)
index 0000000..2482b7f
--- /dev/null
@@ -0,0 +1,150 @@
+/* exynos_drm_iommu.c
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * Author: Inki Dae <inki.dae@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <drmP.h>
+#include <drm/exynos_drm.h>
+
+#include <linux/dma-mapping.h>
+#include <linux/iommu.h>
+#include <linux/kref.h>
+
+#include <asm/dma-iommu.h>
+
+#include "exynos_drm_drv.h"
+#include "exynos_drm_iommu.h"
+
+/*
+ * drm_create_iommu_mapping - create a mapping structure
+ *
+ * @drm_dev: DRM device
+ */
+int drm_create_iommu_mapping(struct drm_device *drm_dev)
+{
+       struct dma_iommu_mapping *mapping = NULL;
+       struct exynos_drm_private *priv = drm_dev->dev_private;
+       struct device *dev = drm_dev->dev;
+
+       if (!priv->da_start)
+               priv->da_start = EXYNOS_DEV_ADDR_START;
+       if (!priv->da_space_size)
+               priv->da_space_size = EXYNOS_DEV_ADDR_SIZE;
+       if (!priv->da_space_order)
+               priv->da_space_order = EXYNOS_DEV_ADDR_ORDER;
+
+       mapping = arm_iommu_create_mapping(&platform_bus_type, priv->da_start,
+                                               priv->da_space_size,
+                                               priv->da_space_order);
+       if (IS_ERR(mapping))
+               return PTR_ERR(mapping);
+
+       dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
+                                       GFP_KERNEL);
+       dma_set_max_seg_size(dev, 0xffffffffu);
+       dev->archdata.mapping = mapping;
+
+       return 0;
+}
+
+/*
+ * drm_release_iommu_mapping - release iommu mapping structure
+ *
+ * @drm_dev: DRM device
+ *
+ * if mapping->kref becomes 0 then all things related to iommu mapping
+ * will be released
+ */
+void drm_release_iommu_mapping(struct drm_device *drm_dev)
+{
+       struct device *dev = drm_dev->dev;
+
+       arm_iommu_release_mapping(dev->archdata.mapping);
+}
+
+/*
+ * drm_iommu_attach_device- attach device to iommu mapping
+ *
+ * @drm_dev: DRM device
+ * @subdrv_dev: device to be attach
+ *
+ * This function should be called by sub drivers to attach it to iommu
+ * mapping.
+ */
+int drm_iommu_attach_device(struct drm_device *drm_dev,
+                               struct device *subdrv_dev)
+{
+       struct device *dev = drm_dev->dev;
+       int ret;
+
+       if (!dev->archdata.mapping) {
+               DRM_ERROR("iommu_mapping is null.\n");
+               return -EFAULT;
+       }
+
+       subdrv_dev->dma_parms = devm_kzalloc(subdrv_dev,
+                                       sizeof(*subdrv_dev->dma_parms),
+                                       GFP_KERNEL);
+       dma_set_max_seg_size(subdrv_dev, 0xffffffffu);
+
+       ret = arm_iommu_attach_device(subdrv_dev, dev->archdata.mapping);
+       if (ret < 0) {
+               DRM_DEBUG_KMS("failed iommu attach.\n");
+               return ret;
+       }
+
+       /*
+        * Set dma_ops to drm_device just one time.
+        *
+        * The dma mapping api needs device object and the api is used
+        * to allocate physial memory and map it with iommu table.
+        * If iommu attach succeeded, the sub driver would have dma_ops
+        * for iommu and also all sub drivers have same dma_ops.
+        */
+       if (!dev->archdata.dma_ops)
+               dev->archdata.dma_ops = subdrv_dev->archdata.dma_ops;
+
+       return 0;
+}
+
+/*
+ * drm_iommu_detach_device -detach device address space mapping from device
+ *
+ * @drm_dev: DRM device
+ * @subdrv_dev: device to be detached
+ *
+ * This function should be called by sub drivers to detach it from iommu
+ * mapping
+ */
+void drm_iommu_detach_device(struct drm_device *drm_dev,
+                               struct device *subdrv_dev)
+{
+       struct device *dev = drm_dev->dev;
+       struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+
+       if (!mapping || !mapping->domain)
+               return;
+
+       iommu_detach_device(mapping->domain, subdrv_dev);
+       drm_release_iommu_mapping(drm_dev);
+}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.h b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
new file mode 100644 (file)
index 0000000..18a0ca1
--- /dev/null
@@ -0,0 +1,85 @@
+/* exynos_drm_iommu.h
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * Authoer: Inki Dae <inki.dae@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_DRM_IOMMU_H_
+#define _EXYNOS_DRM_IOMMU_H_
+
+#define EXYNOS_DEV_ADDR_START  0x20000000
+#define EXYNOS_DEV_ADDR_SIZE   0x40000000
+#define EXYNOS_DEV_ADDR_ORDER  0x4
+
+#ifdef CONFIG_DRM_EXYNOS_IOMMU
+
+int drm_create_iommu_mapping(struct drm_device *drm_dev);
+
+void drm_release_iommu_mapping(struct drm_device *drm_dev);
+
+int drm_iommu_attach_device(struct drm_device *drm_dev,
+                               struct device *subdrv_dev);
+
+void drm_iommu_detach_device(struct drm_device *dev_dev,
+                               struct device *subdrv_dev);
+
+static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
+{
+#ifdef CONFIG_ARM_DMA_USE_IOMMU
+       struct device *dev = drm_dev->dev;
+
+       return dev->archdata.mapping ? true : false;
+#else
+       return false;
+#endif
+}
+
+#else
+
+struct dma_iommu_mapping;
+static inline int drm_create_iommu_mapping(struct drm_device *drm_dev)
+{
+       return 0;
+}
+
+static inline void drm_release_iommu_mapping(struct drm_device *drm_dev)
+{
+}
+
+static inline int drm_iommu_attach_device(struct drm_device *drm_dev,
+                                               struct device *subdrv_dev)
+{
+       return 0;
+}
+
+static inline void drm_iommu_detach_device(struct drm_device *drm_dev,
+                                               struct device *subdrv_dev)
+{
+}
+
+static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
+{
+       return false;
+}
+
+#endif
+#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
new file mode 100644 (file)
index 0000000..49eebe9
--- /dev/null
@@ -0,0 +1,2060 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * Authors:
+ *     Eunchul Kim <chulspro.kim@samsung.com>
+ *     Jinyoung Jeon <jy0.jeon@samsung.com>
+ *     Sangmin Lee <lsmin.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+#include <plat/map-base.h>
+
+#include <drm/drmP.h>
+#include <drm/exynos_drm.h>
+#include "exynos_drm_drv.h"
+#include "exynos_drm_gem.h"
+#include "exynos_drm_ipp.h"
+#include "exynos_drm_iommu.h"
+
+/*
+ * IPP is stand for Image Post Processing and
+ * supports image scaler/rotator and input/output DMA operations.
+ * using FIMC, GSC, Rotator, so on.
+ * IPP is integration device driver of same attribute h/w
+ */
+
+/*
+ * TODO
+ * 1. expand command control id.
+ * 2. integrate        property and config.
+ * 3. removed send_event id check routine.
+ * 4. compare send_event id if needed.
+ * 5. free subdrv_remove notifier callback list if needed.
+ * 6. need to check subdrv_open about multi-open.
+ * 7. need to power_on implement power and sysmmu ctrl.
+ */
+
+#define get_ipp_context(dev)   platform_get_drvdata(to_platform_device(dev))
+#define ipp_is_m2m_cmd(c)      (c == IPP_CMD_M2M)
+
+/*
+ * A structure of event.
+ *
+ * @base: base of event.
+ * @event: ipp event.
+ */
+struct drm_exynos_ipp_send_event {
+       struct drm_pending_event        base;
+       struct drm_exynos_ipp_event     event;
+};
+
+/*
+ * A structure of memory node.
+ *
+ * @list: list head to memory queue information.
+ * @ops_id: id of operations.
+ * @prop_id: id of property.
+ * @buf_id: id of buffer.
+ * @buf_info: gem objects and dma address, size.
+ * @filp: a pointer to drm_file.
+ */
+struct drm_exynos_ipp_mem_node {
+       struct list_head        list;
+       enum drm_exynos_ops_id  ops_id;
+       u32     prop_id;
+       u32     buf_id;
+       struct drm_exynos_ipp_buf_info  buf_info;
+       struct drm_file         *filp;
+};
+
+/*
+ * A structure of ipp context.
+ *
+ * @subdrv: prepare initialization using subdrv.
+ * @ipp_lock: lock for synchronization of access to ipp_idr.
+ * @prop_lock: lock for synchronization of access to prop_idr.
+ * @ipp_idr: ipp driver idr.
+ * @prop_idr: property idr.
+ * @event_workq: event work queue.
+ * @cmd_workq: command work queue.
+ */
+struct ipp_context {
+       struct exynos_drm_subdrv        subdrv;
+       struct mutex    ipp_lock;
+       struct mutex    prop_lock;
+       struct idr      ipp_idr;
+       struct idr      prop_idr;
+       struct workqueue_struct *event_workq;
+       struct workqueue_struct *cmd_workq;
+};
+
+static LIST_HEAD(exynos_drm_ippdrv_list);
+static DEFINE_MUTEX(exynos_drm_ippdrv_lock);
+static BLOCKING_NOTIFIER_HEAD(exynos_drm_ippnb_list);
+
+int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
+{
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       if (!ippdrv)
+               return -EINVAL;
+
+       mutex_lock(&exynos_drm_ippdrv_lock);
+       list_add_tail(&ippdrv->drv_list, &exynos_drm_ippdrv_list);
+       mutex_unlock(&exynos_drm_ippdrv_lock);
+
+       return 0;
+}
+
+int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
+{
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       if (!ippdrv)
+               return -EINVAL;
+
+       mutex_lock(&exynos_drm_ippdrv_lock);
+       list_del(&ippdrv->drv_list);
+       mutex_unlock(&exynos_drm_ippdrv_lock);
+
+       return 0;
+}
+
+static int ipp_create_id(struct idr *id_idr, struct mutex *lock, void *obj,
+               u32 *idp)
+{
+       int ret;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+again:
+       /* ensure there is space available to allocate a handle */
+       if (idr_pre_get(id_idr, GFP_KERNEL) == 0) {
+               DRM_ERROR("failed to get idr.\n");
+               return -ENOMEM;
+       }
+
+       /* do the allocation under our mutexlock */
+       mutex_lock(lock);
+       ret = idr_get_new_above(id_idr, obj, 1, (int *)idp);
+       mutex_unlock(lock);
+       if (ret == -EAGAIN)
+               goto again;
+
+       return ret;
+}
+
+static void *ipp_find_obj(struct idr *id_idr, struct mutex *lock, u32 id)
+{
+       void *obj;
+
+       DRM_DEBUG_KMS("%s:id[%d]\n", __func__, id);
+
+       mutex_lock(lock);
+
+       /* find object using handle */
+       obj = idr_find(id_idr, id);
+       if (!obj) {
+               DRM_ERROR("failed to find object.\n");
+               mutex_unlock(lock);
+               return ERR_PTR(-ENODEV);
+       }
+
+       mutex_unlock(lock);
+
+       return obj;
+}
+
+static inline bool ipp_check_dedicated(struct exynos_drm_ippdrv *ippdrv,
+               enum drm_exynos_ipp_cmd cmd)
+{
+       /*
+        * check dedicated flag and WB, OUTPUT operation with
+        * power on state.
+        */
+       if (ippdrv->dedicated || (!ipp_is_m2m_cmd(cmd) &&
+           !pm_runtime_suspended(ippdrv->dev)))
+               return true;
+
+       return false;
+}
+
+static struct exynos_drm_ippdrv *ipp_find_driver(struct ipp_context *ctx,
+               struct drm_exynos_ipp_property *property)
+{
+       struct exynos_drm_ippdrv *ippdrv;
+       u32 ipp_id = property->ipp_id;
+
+       DRM_DEBUG_KMS("%s:ipp_id[%d]\n", __func__, ipp_id);
+
+       if (ipp_id) {
+               /* find ipp driver using idr */
+               ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
+                       ipp_id);
+               if (IS_ERR_OR_NULL(ippdrv)) {
+                       DRM_ERROR("not found ipp%d driver.\n", ipp_id);
+                       return ippdrv;
+               }
+
+               /*
+                * WB, OUTPUT opertion not supported multi-operation.
+                * so, make dedicated state at set property ioctl.
+                * when ipp driver finished operations, clear dedicated flags.
+                */
+               if (ipp_check_dedicated(ippdrv, property->cmd)) {
+                       DRM_ERROR("already used choose device.\n");
+                       return ERR_PTR(-EBUSY);
+               }
+
+               /*
+                * This is necessary to find correct device in ipp drivers.
+                * ipp drivers have different abilities,
+                * so need to check property.
+                */
+               if (ippdrv->check_property &&
+                   ippdrv->check_property(ippdrv->dev, property)) {
+                       DRM_ERROR("not support property.\n");
+                       return ERR_PTR(-EINVAL);
+               }
+
+               return ippdrv;
+       } else {
+               /*
+                * This case is search all ipp driver for finding.
+                * user application don't set ipp_id in this case,
+                * so ipp subsystem search correct driver in driver list.
+                */
+               list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
+                       if (ipp_check_dedicated(ippdrv, property->cmd)) {
+                               DRM_DEBUG_KMS("%s:used device.\n", __func__);
+                               continue;
+                       }
+
+                       if (ippdrv->check_property &&
+                           ippdrv->check_property(ippdrv->dev, property)) {
+                               DRM_DEBUG_KMS("%s:not support property.\n",
+                                       __func__);
+                               continue;
+                       }
+
+                       return ippdrv;
+               }
+
+               DRM_ERROR("not support ipp driver operations.\n");
+       }
+
+       return ERR_PTR(-ENODEV);
+}
+
+static struct exynos_drm_ippdrv *ipp_find_drv_by_handle(u32 prop_id)
+{
+       struct exynos_drm_ippdrv *ippdrv;
+       struct drm_exynos_ipp_cmd_node *c_node;
+       int count = 0;
+
+       DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, prop_id);
+
+       if (list_empty(&exynos_drm_ippdrv_list)) {
+               DRM_DEBUG_KMS("%s:ippdrv_list is empty.\n", __func__);
+               return ERR_PTR(-ENODEV);
+       }
+
+       /*
+        * This case is search ipp driver by prop_id handle.
+        * sometimes, ipp subsystem find driver by prop_id.
+        * e.g PAUSE state, queue buf, command contro.
+        */
+       list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
+               DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]\n", __func__,
+                       count++, (int)ippdrv);
+
+               if (!list_empty(&ippdrv->cmd_list)) {
+                       list_for_each_entry(c_node, &ippdrv->cmd_list, list)
+                               if (c_node->property.prop_id == prop_id)
+                                       return ippdrv;
+               }
+       }
+
+       return ERR_PTR(-ENODEV);
+}
+
+int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
+               struct drm_file *file)
+{
+       struct drm_exynos_file_private *file_priv = file->driver_priv;
+       struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
+       struct device *dev = priv->dev;
+       struct ipp_context *ctx = get_ipp_context(dev);
+       struct drm_exynos_ipp_prop_list *prop_list = data;
+       struct exynos_drm_ippdrv *ippdrv;
+       int count = 0;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       if (!ctx) {
+               DRM_ERROR("invalid context.\n");
+               return -EINVAL;
+       }
+
+       if (!prop_list) {
+               DRM_ERROR("invalid property parameter.\n");
+               return -EINVAL;
+       }
+
+       DRM_DEBUG_KMS("%s:ipp_id[%d]\n", __func__, prop_list->ipp_id);
+
+       if (!prop_list->ipp_id) {
+               list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list)
+                       count++;
+               /*
+                * Supports ippdrv list count for user application.
+                * First step user application getting ippdrv count.
+                * and second step getting ippdrv capability using ipp_id.
+                */
+               prop_list->count = count;
+       } else {
+               /*
+                * Getting ippdrv capability by ipp_id.
+                * some deivce not supported wb, output interface.
+                * so, user application detect correct ipp driver
+                * using this ioctl.
+                */
+               ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
+                                               prop_list->ipp_id);
+               if (!ippdrv) {
+                       DRM_ERROR("not found ipp%d driver.\n",
+                                       prop_list->ipp_id);
+                       return -EINVAL;
+               }
+
+               prop_list = ippdrv->prop_list;
+       }
+
+       return 0;
+}
+
+static void ipp_print_property(struct drm_exynos_ipp_property *property,
+               int idx)
+{
+       struct drm_exynos_ipp_config *config = &property->config[idx];
+       struct drm_exynos_pos *pos = &config->pos;
+       struct drm_exynos_sz *sz = &config->sz;
+
+       DRM_DEBUG_KMS("%s:prop_id[%d]ops[%s]fmt[0x%x]\n",
+               __func__, property->prop_id, idx ? "dst" : "src", config->fmt);
+
+       DRM_DEBUG_KMS("%s:pos[%d %d %d %d]sz[%d %d]f[%d]r[%d]\n",
+               __func__, pos->x, pos->y, pos->w, pos->h,
+               sz->hsize, sz->vsize, config->flip, config->degree);
+}
+
+static int ipp_find_and_set_property(struct drm_exynos_ipp_property *property)
+{
+       struct exynos_drm_ippdrv *ippdrv;
+       struct drm_exynos_ipp_cmd_node *c_node;
+       u32 prop_id = property->prop_id;
+
+       DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, prop_id);
+
+       ippdrv = ipp_find_drv_by_handle(prop_id);
+       if (IS_ERR_OR_NULL(ippdrv)) {
+               DRM_ERROR("failed to get ipp driver.\n");
+               return -EINVAL;
+       }
+
+       /*
+        * Find command node using command list in ippdrv.
+        * when we find this command no using prop_id.
+        * return property information set in this command node.
+        */
+       list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
+               if ((c_node->property.prop_id == prop_id) &&
+                   (c_node->state == IPP_STATE_STOP)) {
+                       DRM_DEBUG_KMS("%s:found cmd[%d]ippdrv[0x%x]\n",
+                               __func__, property->cmd, (int)ippdrv);
+
+                       c_node->property = *property;
+                       return 0;
+               }
+       }
+
+       DRM_ERROR("failed to search property.\n");
+
+       return -EINVAL;
+}
+
+static struct drm_exynos_ipp_cmd_work *ipp_create_cmd_work(void)
+{
+       struct drm_exynos_ipp_cmd_work *cmd_work;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       cmd_work = kzalloc(sizeof(*cmd_work), GFP_KERNEL);
+       if (!cmd_work) {
+               DRM_ERROR("failed to alloc cmd_work.\n");
+               return ERR_PTR(-ENOMEM);
+       }
+
+       INIT_WORK((struct work_struct *)cmd_work, ipp_sched_cmd);
+
+       return cmd_work;
+}
+
+static struct drm_exynos_ipp_event_work *ipp_create_event_work(void)
+{
+       struct drm_exynos_ipp_event_work *event_work;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       event_work = kzalloc(sizeof(*event_work), GFP_KERNEL);
+       if (!event_work) {
+               DRM_ERROR("failed to alloc event_work.\n");
+               return ERR_PTR(-ENOMEM);
+       }
+
+       INIT_WORK((struct work_struct *)event_work, ipp_sched_event);
+
+       return event_work;
+}
+
+int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
+               struct drm_file *file)
+{
+       struct drm_exynos_file_private *file_priv = file->driver_priv;
+       struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
+       struct device *dev = priv->dev;
+       struct ipp_context *ctx = get_ipp_context(dev);
+       struct drm_exynos_ipp_property *property = data;
+       struct exynos_drm_ippdrv *ippdrv;
+       struct drm_exynos_ipp_cmd_node *c_node;
+       int ret, i;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       if (!ctx) {
+               DRM_ERROR("invalid context.\n");
+               return -EINVAL;
+       }
+
+       if (!property) {
+               DRM_ERROR("invalid property parameter.\n");
+               return -EINVAL;
+       }
+
+       /*
+        * This is log print for user application property.
+        * user application set various property.
+        */
+       for_each_ipp_ops(i)
+               ipp_print_property(property, i);
+
+       /*
+        * set property ioctl generated new prop_id.
+        * but in this case already asigned prop_id using old set property.
+        * e.g PAUSE state. this case supports find current prop_id and use it
+        * instead of allocation.
+        */
+       if (property->prop_id) {
+               DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
+               return ipp_find_and_set_property(property);
+       }
+
+       /* find ipp driver using ipp id */
+       ippdrv = ipp_find_driver(ctx, property);
+       if (IS_ERR_OR_NULL(ippdrv)) {
+               DRM_ERROR("failed to get ipp driver.\n");
+               return -EINVAL;
+       }
+
+       /* allocate command node */
+       c_node = kzalloc(sizeof(*c_node), GFP_KERNEL);
+       if (!c_node) {
+               DRM_ERROR("failed to allocate map node.\n");
+               return -ENOMEM;
+       }
+
+       /* create property id */
+       ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node,
+               &property->prop_id);
+       if (ret) {
+               DRM_ERROR("failed to create id.\n");
+               goto err_clear;
+       }
+
+       DRM_DEBUG_KMS("%s:created prop_id[%d]cmd[%d]ippdrv[0x%x]\n",
+               __func__, property->prop_id, property->cmd, (int)ippdrv);
+
+       /* stored property information and ippdrv in private data */
+       c_node->priv = priv;
+       c_node->property = *property;
+       c_node->state = IPP_STATE_IDLE;
+
+       c_node->start_work = ipp_create_cmd_work();
+       if (IS_ERR_OR_NULL(c_node->start_work)) {
+               DRM_ERROR("failed to create start work.\n");
+               goto err_clear;
+       }
+
+       c_node->stop_work = ipp_create_cmd_work();
+       if (IS_ERR_OR_NULL(c_node->stop_work)) {
+               DRM_ERROR("failed to create stop work.\n");
+               goto err_free_start;
+       }
+
+       c_node->event_work = ipp_create_event_work();
+       if (IS_ERR_OR_NULL(c_node->event_work)) {
+               DRM_ERROR("failed to create event work.\n");
+               goto err_free_stop;
+       }
+
+       mutex_init(&c_node->cmd_lock);
+       mutex_init(&c_node->mem_lock);
+       mutex_init(&c_node->event_lock);
+
+       init_completion(&c_node->start_complete);
+       init_completion(&c_node->stop_complete);
+
+       for_each_ipp_ops(i)
+               INIT_LIST_HEAD(&c_node->mem_list[i]);
+
+       INIT_LIST_HEAD(&c_node->event_list);
+       list_splice_init(&priv->event_list, &c_node->event_list);
+       list_add_tail(&c_node->list, &ippdrv->cmd_list);
+
+       /* make dedicated state without m2m */
+       if (!ipp_is_m2m_cmd(property->cmd))
+               ippdrv->dedicated = true;
+
+       return 0;
+
+err_free_stop:
+       kfree(c_node->stop_work);
+err_free_start:
+       kfree(c_node->start_work);
+err_clear:
+       kfree(c_node);
+       return ret;
+}
+
+static void ipp_clean_cmd_node(struct drm_exynos_ipp_cmd_node *c_node)
+{
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       /* delete list */
+       list_del(&c_node->list);
+
+       /* destroy mutex */
+       mutex_destroy(&c_node->cmd_lock);
+       mutex_destroy(&c_node->mem_lock);
+       mutex_destroy(&c_node->event_lock);
+
+       /* free command node */
+       kfree(c_node->start_work);
+       kfree(c_node->stop_work);
+       kfree(c_node->event_work);
+       kfree(c_node);
+}
+
+static int ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node)
+{
+       struct drm_exynos_ipp_property *property = &c_node->property;
+       struct drm_exynos_ipp_mem_node *m_node;
+       struct list_head *head;
+       int ret, i, count[EXYNOS_DRM_OPS_MAX] = { 0, };
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       mutex_lock(&c_node->mem_lock);
+
+       for_each_ipp_ops(i) {
+               /* source/destination memory list */
+               head = &c_node->mem_list[i];
+
+               if (list_empty(head)) {
+                       DRM_DEBUG_KMS("%s:%s memory empty.\n", __func__,
+                               i ? "dst" : "src");
+                       continue;
+               }
+
+               /* find memory node entry */
+               list_for_each_entry(m_node, head, list) {
+                       DRM_DEBUG_KMS("%s:%s,count[%d]m_node[0x%x]\n", __func__,
+                               i ? "dst" : "src", count[i], (int)m_node);
+                       count[i]++;
+               }
+       }
+
+       DRM_DEBUG_KMS("%s:min[%d]max[%d]\n", __func__,
+               min(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]),
+               max(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]));
+
+       /*
+        * M2M operations should be need paired memory address.
+        * so, need to check minimum count about src, dst.
+        * other case not use paired memory, so use maximum count
+        */
+       if (ipp_is_m2m_cmd(property->cmd))
+               ret = min(count[EXYNOS_DRM_OPS_SRC],
+                       count[EXYNOS_DRM_OPS_DST]);
+       else
+               ret = max(count[EXYNOS_DRM_OPS_SRC],
+                       count[EXYNOS_DRM_OPS_DST]);
+
+       mutex_unlock(&c_node->mem_lock);
+
+       return ret;
+}
+
+static struct drm_exynos_ipp_mem_node
+               *ipp_find_mem_node(struct drm_exynos_ipp_cmd_node *c_node,
+               struct drm_exynos_ipp_queue_buf *qbuf)
+{
+       struct drm_exynos_ipp_mem_node *m_node;
+       struct list_head *head;
+       int count = 0;
+
+       DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, qbuf->buf_id);
+
+       /* source/destination memory list */
+       head = &c_node->mem_list[qbuf->ops_id];
+
+       /* find memory node from memory list */
+       list_for_each_entry(m_node, head, list) {
+               DRM_DEBUG_KMS("%s:count[%d]m_node[0x%x]\n",
+                       __func__, count++, (int)m_node);
+
+               /* compare buffer id */
+               if (m_node->buf_id == qbuf->buf_id)
+                       return m_node;
+       }
+
+       return NULL;
+}
+
+static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
+               struct drm_exynos_ipp_cmd_node *c_node,
+               struct drm_exynos_ipp_mem_node *m_node)
+{
+       struct exynos_drm_ipp_ops *ops = NULL;
+       int ret = 0;
+
+       DRM_DEBUG_KMS("%s:node[0x%x]\n", __func__, (int)m_node);
+
+       if (!m_node) {
+               DRM_ERROR("invalid queue node.\n");
+               return -EFAULT;
+       }
+
+       mutex_lock(&c_node->mem_lock);
+
+       DRM_DEBUG_KMS("%s:ops_id[%d]\n", __func__, m_node->ops_id);
+
+       /* get operations callback */
+       ops = ippdrv->ops[m_node->ops_id];
+       if (!ops) {
+               DRM_ERROR("not support ops.\n");
+               ret = -EFAULT;
+               goto err_unlock;
+       }
+
+       /* set address and enable irq */
+       if (ops->set_addr) {
+               ret = ops->set_addr(ippdrv->dev, &m_node->buf_info,
+                       m_node->buf_id, IPP_BUF_ENQUEUE);
+               if (ret) {
+                       DRM_ERROR("failed to set addr.\n");
+                       goto err_unlock;
+               }
+       }
+
+err_unlock:
+       mutex_unlock(&c_node->mem_lock);
+       return ret;
+}
+
+static struct drm_exynos_ipp_mem_node
+               *ipp_get_mem_node(struct drm_device *drm_dev,
+               struct drm_file *file,
+               struct drm_exynos_ipp_cmd_node *c_node,
+               struct drm_exynos_ipp_queue_buf *qbuf)
+{
+       struct drm_exynos_ipp_mem_node *m_node;
+       struct drm_exynos_ipp_buf_info buf_info;
+       void *addr;
+       int i;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       mutex_lock(&c_node->mem_lock);
+
+       m_node = kzalloc(sizeof(*m_node), GFP_KERNEL);
+       if (!m_node) {
+               DRM_ERROR("failed to allocate queue node.\n");
+               goto err_unlock;
+       }
+
+       /* clear base address for error handling */
+       memset(&buf_info, 0x0, sizeof(buf_info));
+
+       /* operations, buffer id */
+       m_node->ops_id = qbuf->ops_id;
+       m_node->prop_id = qbuf->prop_id;
+       m_node->buf_id = qbuf->buf_id;
+
+       DRM_DEBUG_KMS("%s:m_node[0x%x]ops_id[%d]\n", __func__,
+               (int)m_node, qbuf->ops_id);
+       DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]\n", __func__,
+               qbuf->prop_id, m_node->buf_id);
+
+       for_each_ipp_planar(i) {
+               DRM_DEBUG_KMS("%s:i[%d]handle[0x%x]\n", __func__,
+                       i, qbuf->handle[i]);
+
+               /* get dma address by handle */
+               if (qbuf->handle[i]) {
+                       addr = exynos_drm_gem_get_dma_addr(drm_dev,
+                                       qbuf->handle[i], file);
+                       if (IS_ERR(addr)) {
+                               DRM_ERROR("failed to get addr.\n");
+                               goto err_clear;
+                       }
+
+                       buf_info.handles[i] = qbuf->handle[i];
+                       buf_info.base[i] = *(dma_addr_t *) addr;
+                       DRM_DEBUG_KMS("%s:i[%d]base[0x%x]hd[0x%x]\n",
+                               __func__, i, buf_info.base[i],
+                               (int)buf_info.handles[i]);
+               }
+       }
+
+       m_node->filp = file;
+       m_node->buf_info = buf_info;
+       list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]);
+
+       mutex_unlock(&c_node->mem_lock);
+       return m_node;
+
+err_clear:
+       kfree(m_node);
+err_unlock:
+       mutex_unlock(&c_node->mem_lock);
+       return ERR_PTR(-EFAULT);
+}
+
+static int ipp_put_mem_node(struct drm_device *drm_dev,
+               struct drm_exynos_ipp_cmd_node *c_node,
+               struct drm_exynos_ipp_mem_node *m_node)
+{
+       int i;
+
+       DRM_DEBUG_KMS("%s:node[0x%x]\n", __func__, (int)m_node);
+
+       if (!m_node) {
+               DRM_ERROR("invalid dequeue node.\n");
+               return -EFAULT;
+       }
+
+       if (list_empty(&m_node->list)) {
+               DRM_ERROR("empty memory node.\n");
+               return -ENOMEM;
+       }
+
+       mutex_lock(&c_node->mem_lock);
+
+       DRM_DEBUG_KMS("%s:ops_id[%d]\n", __func__, m_node->ops_id);
+
+       /* put gem buffer */
+       for_each_ipp_planar(i) {
+               unsigned long handle = m_node->buf_info.handles[i];
+               if (handle)
+                       exynos_drm_gem_put_dma_addr(drm_dev, handle,
+                                                       m_node->filp);
+       }
+
+       /* delete list in queue */
+       list_del(&m_node->list);
+       kfree(m_node);
+
+       mutex_unlock(&c_node->mem_lock);
+
+       return 0;
+}
+
+static void ipp_free_event(struct drm_pending_event *event)
+{
+       kfree(event);
+}
+
+static int ipp_get_event(struct drm_device *drm_dev,
+               struct drm_file *file,
+               struct drm_exynos_ipp_cmd_node *c_node,
+               struct drm_exynos_ipp_queue_buf *qbuf)
+{
+       struct drm_exynos_ipp_send_event *e;
+       unsigned long flags;
+
+       DRM_DEBUG_KMS("%s:ops_id[%d]buf_id[%d]\n", __func__,
+               qbuf->ops_id, qbuf->buf_id);
+
+       e = kzalloc(sizeof(*e), GFP_KERNEL);
+
+       if (!e) {
+               DRM_ERROR("failed to allocate event.\n");
+               spin_lock_irqsave(&drm_dev->event_lock, flags);
+               file->event_space += sizeof(e->event);
+               spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+               return -ENOMEM;
+       }
+
+       /* make event */
+       e->event.base.type = DRM_EXYNOS_IPP_EVENT;
+       e->event.base.length = sizeof(e->event);
+       e->event.user_data = qbuf->user_data;
+       e->event.prop_id = qbuf->prop_id;
+       e->event.buf_id[EXYNOS_DRM_OPS_DST] = qbuf->buf_id;
+       e->base.event = &e->event.base;
+       e->base.file_priv = file;
+       e->base.destroy = ipp_free_event;
+       list_add_tail(&e->base.link, &c_node->event_list);
+
+       return 0;
+}
+
+static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
+               struct drm_exynos_ipp_queue_buf *qbuf)
+{
+       struct drm_exynos_ipp_send_event *e, *te;
+       int count = 0;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       if (list_empty(&c_node->event_list)) {
+               DRM_DEBUG_KMS("%s:event_list is empty.\n", __func__);
+               return;
+       }
+
+       list_for_each_entry_safe(e, te, &c_node->event_list, base.link) {
+               DRM_DEBUG_KMS("%s:count[%d]e[0x%x]\n",
+                       __func__, count++, (int)e);
+
+               /*
+                * quf == NULL condition means all event deletion.
+                * stop operations want to delete all event list.
+                * another case delete only same buf id.
+                */
+               if (!qbuf) {
+                       /* delete list */
+                       list_del(&e->base.link);
+                       kfree(e);
+               }
+
+               /* compare buffer id */
+               if (qbuf && (qbuf->buf_id ==
+                   e->event.buf_id[EXYNOS_DRM_OPS_DST])) {
+                       /* delete list */
+                       list_del(&e->base.link);
+                       kfree(e);
+                       return;
+               }
+       }
+}
+
+void ipp_handle_cmd_work(struct device *dev,
+               struct exynos_drm_ippdrv *ippdrv,
+               struct drm_exynos_ipp_cmd_work *cmd_work,
+               struct drm_exynos_ipp_cmd_node *c_node)
+{
+       struct ipp_context *ctx = get_ipp_context(dev);
+
+       cmd_work->ippdrv = ippdrv;
+       cmd_work->c_node = c_node;
+       queue_work(ctx->cmd_workq, (struct work_struct *)cmd_work);
+}
+
+static int ipp_queue_buf_with_run(struct device *dev,
+               struct drm_exynos_ipp_cmd_node *c_node,
+               struct drm_exynos_ipp_mem_node *m_node,
+               struct drm_exynos_ipp_queue_buf *qbuf)
+{
+       struct exynos_drm_ippdrv *ippdrv;
+       struct drm_exynos_ipp_property *property;
+       struct exynos_drm_ipp_ops *ops;
+       int ret;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       ippdrv = ipp_find_drv_by_handle(qbuf->prop_id);
+       if (IS_ERR_OR_NULL(ippdrv)) {
+               DRM_ERROR("failed to get ipp driver.\n");
+               return -EFAULT;
+       }
+
+       ops = ippdrv->ops[qbuf->ops_id];
+       if (!ops) {
+               DRM_ERROR("failed to get ops.\n");
+               return -EFAULT;
+       }
+
+       property = &c_node->property;
+
+       if (c_node->state != IPP_STATE_START) {
+               DRM_DEBUG_KMS("%s:bypass for invalid state.\n" , __func__);
+               return 0;
+       }
+
+       if (!ipp_check_mem_list(c_node)) {
+               DRM_DEBUG_KMS("%s:empty memory.\n", __func__);
+               return 0;
+       }
+
+       /*
+        * If set destination buffer and enabled clock,
+        * then m2m operations need start operations at queue_buf
+        */
+       if (ipp_is_m2m_cmd(property->cmd)) {
+               struct drm_exynos_ipp_cmd_work *cmd_work = c_node->start_work;
+
+               cmd_work->ctrl = IPP_CTRL_PLAY;
+               ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
+       } else {
+               ret = ipp_set_mem_node(ippdrv, c_node, m_node);
+               if (ret) {
+                       DRM_ERROR("failed to set m node.\n");
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
+static void ipp_clean_queue_buf(struct drm_device *drm_dev,
+               struct drm_exynos_ipp_cmd_node *c_node,
+               struct drm_exynos_ipp_queue_buf *qbuf)
+{
+       struct drm_exynos_ipp_mem_node *m_node, *tm_node;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       if (!list_empty(&c_node->mem_list[qbuf->ops_id])) {
+               /* delete list */
+               list_for_each_entry_safe(m_node, tm_node,
+                       &c_node->mem_list[qbuf->ops_id], list) {
+                       if (m_node->buf_id == qbuf->buf_id &&
+                           m_node->ops_id == qbuf->ops_id)
+                               ipp_put_mem_node(drm_dev, c_node, m_node);
+               }
+       }
+}
+
+int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
+               struct drm_file *file)
+{
+       struct drm_exynos_file_private *file_priv = file->driver_priv;
+       struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
+       struct device *dev = priv->dev;
+       struct ipp_context *ctx = get_ipp_context(dev);
+       struct drm_exynos_ipp_queue_buf *qbuf = data;
+       struct drm_exynos_ipp_cmd_node *c_node;
+       struct drm_exynos_ipp_mem_node *m_node;
+       int ret;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       if (!qbuf) {
+               DRM_ERROR("invalid buf parameter.\n");
+               return -EINVAL;
+       }
+
+       if (qbuf->ops_id >= EXYNOS_DRM_OPS_MAX) {
+               DRM_ERROR("invalid ops parameter.\n");
+               return -EINVAL;
+       }
+
+       DRM_DEBUG_KMS("%s:prop_id[%d]ops_id[%s]buf_id[%d]buf_type[%d]\n",
+               __func__, qbuf->prop_id, qbuf->ops_id ? "dst" : "src",
+               qbuf->buf_id, qbuf->buf_type);
+
+       /* find command node */
+       c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
+               qbuf->prop_id);
+       if (!c_node) {
+               DRM_ERROR("failed to get command node.\n");
+               return -EFAULT;
+       }
+
+       /* buffer control */
+       switch (qbuf->buf_type) {
+       case IPP_BUF_ENQUEUE:
+               /* get memory node */
+               m_node = ipp_get_mem_node(drm_dev, file, c_node, qbuf);
+               if (IS_ERR(m_node)) {
+                       DRM_ERROR("failed to get m_node.\n");
+                       return PTR_ERR(m_node);
+               }
+
+               /*
+                * first step get event for destination buffer.
+                * and second step when M2M case run with destination buffer
+                * if needed.
+                */
+               if (qbuf->ops_id == EXYNOS_DRM_OPS_DST) {
+                       /* get event for destination buffer */
+                       ret = ipp_get_event(drm_dev, file, c_node, qbuf);
+                       if (ret) {
+                               DRM_ERROR("failed to get event.\n");
+                               goto err_clean_node;
+                       }
+
+                       /*
+                        * M2M case run play control for streaming feature.
+                        * other case set address and waiting.
+                        */
+                       ret = ipp_queue_buf_with_run(dev, c_node, m_node, qbuf);
+                       if (ret) {
+                               DRM_ERROR("failed to run command.\n");
+                               goto err_clean_node;
+                       }
+               }
+               break;
+       case IPP_BUF_DEQUEUE:
+               mutex_lock(&c_node->cmd_lock);
+
+               /* put event for destination buffer */
+               if (qbuf->ops_id == EXYNOS_DRM_OPS_DST)
+                       ipp_put_event(c_node, qbuf);
+
+               ipp_clean_queue_buf(drm_dev, c_node, qbuf);
+
+               mutex_unlock(&c_node->cmd_lock);
+               break;
+       default:
+               DRM_ERROR("invalid buffer control.\n");
+               return -EINVAL;
+       }
+
+       return 0;
+
+err_clean_node:
+       DRM_ERROR("clean memory nodes.\n");
+
+       ipp_clean_queue_buf(drm_dev, c_node, qbuf);
+       return ret;
+}
+
+static bool exynos_drm_ipp_check_valid(struct device *dev,
+               enum drm_exynos_ipp_ctrl ctrl, enum drm_exynos_ipp_state state)
+{
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       if (ctrl != IPP_CTRL_PLAY) {
+               if (pm_runtime_suspended(dev)) {
+                       DRM_ERROR("pm:runtime_suspended.\n");
+                       goto err_status;
+               }
+       }
+
+       switch (ctrl) {
+       case IPP_CTRL_PLAY:
+               if (state != IPP_STATE_IDLE)
+                       goto err_status;
+               break;
+       case IPP_CTRL_STOP:
+               if (state == IPP_STATE_STOP)
+                       goto err_status;
+               break;
+       case IPP_CTRL_PAUSE:
+               if (state != IPP_STATE_START)
+                       goto err_status;
+               break;
+       case IPP_CTRL_RESUME:
+               if (state != IPP_STATE_STOP)
+                       goto err_status;
+               break;
+       default:
+               DRM_ERROR("invalid state.\n");
+               goto err_status;
+               break;
+       }
+
+       return true;
+
+err_status:
+       DRM_ERROR("invalid status:ctrl[%d]state[%d]\n", ctrl, state);
+       return false;
+}
+
+int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
+               struct drm_file *file)
+{
+       struct drm_exynos_file_private *file_priv = file->driver_priv;
+       struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
+       struct exynos_drm_ippdrv *ippdrv = NULL;
+       struct device *dev = priv->dev;
+       struct ipp_context *ctx = get_ipp_context(dev);
+       struct drm_exynos_ipp_cmd_ctrl *cmd_ctrl = data;
+       struct drm_exynos_ipp_cmd_work *cmd_work;
+       struct drm_exynos_ipp_cmd_node *c_node;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       if (!ctx) {
+               DRM_ERROR("invalid context.\n");
+               return -EINVAL;
+       }
+
+       if (!cmd_ctrl) {
+               DRM_ERROR("invalid control parameter.\n");
+               return -EINVAL;
+       }
+
+       DRM_DEBUG_KMS("%s:ctrl[%d]prop_id[%d]\n", __func__,
+               cmd_ctrl->ctrl, cmd_ctrl->prop_id);
+
+       ippdrv = ipp_find_drv_by_handle(cmd_ctrl->prop_id);
+       if (IS_ERR(ippdrv)) {
+               DRM_ERROR("failed to get ipp driver.\n");
+               return PTR_ERR(ippdrv);
+       }
+
+       c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
+               cmd_ctrl->prop_id);
+       if (!c_node) {
+               DRM_ERROR("invalid command node list.\n");
+               return -EINVAL;
+       }
+
+       if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl,
+           c_node->state)) {
+               DRM_ERROR("invalid state.\n");
+               return -EINVAL;
+       }
+
+       switch (cmd_ctrl->ctrl) {
+       case IPP_CTRL_PLAY:
+               if (pm_runtime_suspended(ippdrv->dev))
+                       pm_runtime_get_sync(ippdrv->dev);
+               c_node->state = IPP_STATE_START;
+
+               cmd_work = c_node->start_work;
+               cmd_work->ctrl = cmd_ctrl->ctrl;
+               ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
+               c_node->state = IPP_STATE_START;
+               break;
+       case IPP_CTRL_STOP:
+               cmd_work = c_node->stop_work;
+               cmd_work->ctrl = cmd_ctrl->ctrl;
+               ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
+
+               if (!wait_for_completion_timeout(&c_node->stop_complete,
+                   msecs_to_jiffies(300))) {
+                       DRM_ERROR("timeout stop:prop_id[%d]\n",
+                               c_node->property.prop_id);
+               }
+
+               c_node->state = IPP_STATE_STOP;
+               ippdrv->dedicated = false;
+               ipp_clean_cmd_node(c_node);
+
+               if (list_empty(&ippdrv->cmd_list))
+                       pm_runtime_put_sync(ippdrv->dev);
+               break;
+       case IPP_CTRL_PAUSE:
+               cmd_work = c_node->stop_work;
+               cmd_work->ctrl = cmd_ctrl->ctrl;
+               ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
+
+               if (!wait_for_completion_timeout(&c_node->stop_complete,
+                   msecs_to_jiffies(200))) {
+                       DRM_ERROR("timeout stop:prop_id[%d]\n",
+                               c_node->property.prop_id);
+               }
+
+               c_node->state = IPP_STATE_STOP;
+               break;
+       case IPP_CTRL_RESUME:
+               c_node->state = IPP_STATE_START;
+               cmd_work = c_node->start_work;
+               cmd_work->ctrl = cmd_ctrl->ctrl;
+               ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
+               break;
+       default:
+               DRM_ERROR("could not support this state currently.\n");
+               return -EINVAL;
+       }
+
+       DRM_DEBUG_KMS("%s:done ctrl[%d]prop_id[%d]\n", __func__,
+               cmd_ctrl->ctrl, cmd_ctrl->prop_id);
+
+       return 0;
+}
+
+int exynos_drm_ippnb_register(struct notifier_block *nb)
+{
+       return blocking_notifier_chain_register(
+               &exynos_drm_ippnb_list, nb);
+}
+
+int exynos_drm_ippnb_unregister(struct notifier_block *nb)
+{
+       return blocking_notifier_chain_unregister(
+               &exynos_drm_ippnb_list, nb);
+}
+
+int exynos_drm_ippnb_send_event(unsigned long val, void *v)
+{
+       return blocking_notifier_call_chain(
+               &exynos_drm_ippnb_list, val, v);
+}
+
+static int ipp_set_property(struct exynos_drm_ippdrv *ippdrv,
+               struct drm_exynos_ipp_property *property)
+{
+       struct exynos_drm_ipp_ops *ops = NULL;
+       bool swap = false;
+       int ret, i;
+
+       if (!property) {
+               DRM_ERROR("invalid property parameter.\n");
+               return -EINVAL;
+       }
+
+       DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
+
+       /* reset h/w block */
+       if (ippdrv->reset &&
+           ippdrv->reset(ippdrv->dev)) {
+               DRM_ERROR("failed to reset.\n");
+               return -EINVAL;
+       }
+
+       /* set source,destination operations */
+       for_each_ipp_ops(i) {
+               struct drm_exynos_ipp_config *config =
+                       &property->config[i];
+
+               ops = ippdrv->ops[i];
+               if (!ops || !config) {
+                       DRM_ERROR("not support ops and config.\n");
+                       return -EINVAL;
+               }
+
+               /* set format */
+               if (ops->set_fmt) {
+                       ret = ops->set_fmt(ippdrv->dev, config->fmt);
+                       if (ret) {
+                               DRM_ERROR("not support format.\n");
+                               return ret;
+                       }
+               }
+
+               /* set transform for rotation, flip */
+               if (ops->set_transf) {
+                       ret = ops->set_transf(ippdrv->dev, config->degree,
+                               config->flip, &swap);
+                       if (ret) {
+                               DRM_ERROR("not support tranf.\n");
+                               return -EINVAL;
+                       }
+               }
+
+               /* set size */
+               if (ops->set_size) {
+                       ret = ops->set_size(ippdrv->dev, swap, &config->pos,
+                               &config->sz);
+                       if (ret) {
+                               DRM_ERROR("not support size.\n");
+                               return ret;
+                       }
+               }
+       }
+
+       return 0;
+}
+
+static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
+               struct drm_exynos_ipp_cmd_node *c_node)
+{
+       struct drm_exynos_ipp_mem_node *m_node;
+       struct drm_exynos_ipp_property *property = &c_node->property;
+       struct list_head *head;
+       int ret, i;
+
+       DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
+
+       /* store command info in ippdrv */
+       ippdrv->cmd = c_node;
+
+       if (!ipp_check_mem_list(c_node)) {
+               DRM_DEBUG_KMS("%s:empty memory.\n", __func__);
+               return -ENOMEM;
+       }
+
+       /* set current property in ippdrv */
+       ret = ipp_set_property(ippdrv, property);
+       if (ret) {
+               DRM_ERROR("failed to set property.\n");
+               ippdrv->cmd = NULL;
+               return ret;
+       }
+
+       /* check command */
+       switch (property->cmd) {
+       case IPP_CMD_M2M:
+               for_each_ipp_ops(i) {
+                       /* source/destination memory list */
+                       head = &c_node->mem_list[i];
+
+                       m_node = list_first_entry(head,
+                               struct drm_exynos_ipp_mem_node, list);
+                       if (!m_node) {
+                               DRM_ERROR("failed to get node.\n");
+                               ret = -EFAULT;
+                               return ret;
+                       }
+
+                       DRM_DEBUG_KMS("%s:m_node[0x%x]\n",
+                               __func__, (int)m_node);
+
+                       ret = ipp_set_mem_node(ippdrv, c_node, m_node);
+                       if (ret) {
+                               DRM_ERROR("failed to set m node.\n");
+                               return ret;
+                       }
+               }
+               break;
+       case IPP_CMD_WB:
+               /* destination memory list */
+               head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
+
+               list_for_each_entry(m_node, head, list) {
+                       ret = ipp_set_mem_node(ippdrv, c_node, m_node);
+                       if (ret) {
+                               DRM_ERROR("failed to set m node.\n");
+                               return ret;
+                       }
+               }
+               break;
+       case IPP_CMD_OUTPUT:
+               /* source memory list */
+               head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
+
+               list_for_each_entry(m_node, head, list) {
+                       ret = ipp_set_mem_node(ippdrv, c_node, m_node);
+                       if (ret) {
+                               DRM_ERROR("failed to set m node.\n");
+                               return ret;
+                       }
+               }
+               break;
+       default:
+               DRM_ERROR("invalid operations.\n");
+               return -EINVAL;
+       }
+
+       DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, property->cmd);
+
+       /* start operations */
+       if (ippdrv->start) {
+               ret = ippdrv->start(ippdrv->dev, property->cmd);
+               if (ret) {
+                       DRM_ERROR("failed to start ops.\n");
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
+static int ipp_stop_property(struct drm_device *drm_dev,
+               struct exynos_drm_ippdrv *ippdrv,
+               struct drm_exynos_ipp_cmd_node *c_node)
+{
+       struct drm_exynos_ipp_mem_node *m_node, *tm_node;
+       struct drm_exynos_ipp_property *property = &c_node->property;
+       struct list_head *head;
+       int ret = 0, i;
+
+       DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
+
+       /* put event */
+       ipp_put_event(c_node, NULL);
+
+       /* check command */
+       switch (property->cmd) {
+       case IPP_CMD_M2M:
+               for_each_ipp_ops(i) {
+                       /* source/destination memory list */
+                       head = &c_node->mem_list[i];
+
+                       if (list_empty(head)) {
+                               DRM_DEBUG_KMS("%s:mem_list is empty.\n",
+                                       __func__);
+                               break;
+                       }
+
+                       list_for_each_entry_safe(m_node, tm_node,
+                               head, list) {
+                               ret = ipp_put_mem_node(drm_dev, c_node,
+                                       m_node);
+                               if (ret) {
+                                       DRM_ERROR("failed to put m_node.\n");
+                                       goto err_clear;
+                               }
+                       }
+               }
+               break;
+       case IPP_CMD_WB:
+               /* destination memory list */
+               head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
+
+               if (list_empty(head)) {
+                       DRM_DEBUG_KMS("%s:mem_list is empty.\n", __func__);
+                       break;
+               }
+
+               list_for_each_entry_safe(m_node, tm_node, head, list) {
+                       ret = ipp_put_mem_node(drm_dev, c_node, m_node);
+                       if (ret) {
+                               DRM_ERROR("failed to put m_node.\n");
+                               goto err_clear;
+                       }
+               }
+               break;
+       case IPP_CMD_OUTPUT:
+               /* source memory list */
+               head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
+
+               if (list_empty(head)) {
+                       DRM_DEBUG_KMS("%s:mem_list is empty.\n", __func__);
+                       break;
+               }
+
+               list_for_each_entry_safe(m_node, tm_node, head, list) {
+                       ret = ipp_put_mem_node(drm_dev, c_node, m_node);
+                       if (ret) {
+                               DRM_ERROR("failed to put m_node.\n");
+                               goto err_clear;
+                       }
+               }
+               break;
+       default:
+               DRM_ERROR("invalid operations.\n");
+               ret = -EINVAL;
+               goto err_clear;
+       }
+
+err_clear:
+       /* stop operations */
+       if (ippdrv->stop)
+               ippdrv->stop(ippdrv->dev, property->cmd);
+
+       return ret;
+}
+
+void ipp_sched_cmd(struct work_struct *work)
+{
+       struct drm_exynos_ipp_cmd_work *cmd_work =
+               (struct drm_exynos_ipp_cmd_work *)work;
+       struct exynos_drm_ippdrv *ippdrv;
+       struct drm_exynos_ipp_cmd_node *c_node;
+       struct drm_exynos_ipp_property *property;
+       int ret;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       ippdrv = cmd_work->ippdrv;
+       if (!ippdrv) {
+               DRM_ERROR("invalid ippdrv list.\n");
+               return;
+       }
+
+       c_node = cmd_work->c_node;
+       if (!c_node) {
+               DRM_ERROR("invalid command node list.\n");
+               return;
+       }
+
+       mutex_lock(&c_node->cmd_lock);
+
+       property = &c_node->property;
+       if (!property) {
+               DRM_ERROR("failed to get property:prop_id[%d]\n",
+                       c_node->property.prop_id);
+               goto err_unlock;
+       }
+
+       switch (cmd_work->ctrl) {
+       case IPP_CTRL_PLAY:
+       case IPP_CTRL_RESUME:
+               ret = ipp_start_property(ippdrv, c_node);
+               if (ret) {
+                       DRM_ERROR("failed to start property:prop_id[%d]\n",
+                               c_node->property.prop_id);
+                       goto err_unlock;
+               }
+
+               /*
+                * M2M case supports wait_completion of transfer.
+                * because M2M case supports single unit operation
+                * with multiple queue.
+                * M2M need to wait completion of data transfer.
+                */
+               if (ipp_is_m2m_cmd(property->cmd)) {
+                       if (!wait_for_completion_timeout
+                           (&c_node->start_complete, msecs_to_jiffies(200))) {
+                               DRM_ERROR("timeout event:prop_id[%d]\n",
+                                       c_node->property.prop_id);
+                               goto err_unlock;
+                       }
+               }
+               break;
+       case IPP_CTRL_STOP:
+       case IPP_CTRL_PAUSE:
+               ret = ipp_stop_property(ippdrv->drm_dev, ippdrv,
+                       c_node);
+               if (ret) {
+                       DRM_ERROR("failed to stop property.\n");
+                       goto err_unlock;
+               }
+
+               complete(&c_node->stop_complete);
+               break;
+       default:
+               DRM_ERROR("unknown control type\n");
+               break;
+       }
+
+       DRM_DEBUG_KMS("%s:ctrl[%d] done.\n", __func__, cmd_work->ctrl);
+
+err_unlock:
+       mutex_unlock(&c_node->cmd_lock);
+}
+
+static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
+               struct drm_exynos_ipp_cmd_node *c_node, int *buf_id)
+{
+       struct drm_device *drm_dev = ippdrv->drm_dev;
+       struct drm_exynos_ipp_property *property = &c_node->property;
+       struct drm_exynos_ipp_mem_node *m_node;
+       struct drm_exynos_ipp_queue_buf qbuf;
+       struct drm_exynos_ipp_send_event *e;
+       struct list_head *head;
+       struct timeval now;
+       unsigned long flags;
+       u32 tbuf_id[EXYNOS_DRM_OPS_MAX] = {0, };
+       int ret, i;
+
+       for_each_ipp_ops(i)
+               DRM_DEBUG_KMS("%s:%s buf_id[%d]\n", __func__,
+                       i ? "dst" : "src", buf_id[i]);
+
+       if (!drm_dev) {
+               DRM_ERROR("failed to get drm_dev.\n");
+               return -EINVAL;
+       }
+
+       if (!property) {
+               DRM_ERROR("failed to get property.\n");
+               return -EINVAL;
+       }
+
+       if (list_empty(&c_node->event_list)) {
+               DRM_DEBUG_KMS("%s:event list is empty.\n", __func__);
+               return 0;
+       }
+
+       if (!ipp_check_mem_list(c_node)) {
+               DRM_DEBUG_KMS("%s:empty memory.\n", __func__);
+               return 0;
+       }
+
+       /* check command */
+       switch (property->cmd) {
+       case IPP_CMD_M2M:
+               for_each_ipp_ops(i) {
+                       /* source/destination memory list */
+                       head = &c_node->mem_list[i];
+
+                       m_node = list_first_entry(head,
+                               struct drm_exynos_ipp_mem_node, list);
+                       if (!m_node) {
+                               DRM_ERROR("empty memory node.\n");
+                               return -ENOMEM;
+                       }
+
+                       tbuf_id[i] = m_node->buf_id;
+                       DRM_DEBUG_KMS("%s:%s buf_id[%d]\n", __func__,
+                               i ? "dst" : "src", tbuf_id[i]);
+
+                       ret = ipp_put_mem_node(drm_dev, c_node, m_node);
+                       if (ret)
+                               DRM_ERROR("failed to put m_node.\n");
+               }
+               break;
+       case IPP_CMD_WB:
+               /* clear buf for finding */
+               memset(&qbuf, 0x0, sizeof(qbuf));
+               qbuf.ops_id = EXYNOS_DRM_OPS_DST;
+               qbuf.buf_id = buf_id[EXYNOS_DRM_OPS_DST];
+
+               /* get memory node entry */
+               m_node = ipp_find_mem_node(c_node, &qbuf);
+               if (!m_node) {
+                       DRM_ERROR("empty memory node.\n");
+                       return -ENOMEM;
+               }
+
+               tbuf_id[EXYNOS_DRM_OPS_DST] = m_node->buf_id;
+
+               ret = ipp_put_mem_node(drm_dev, c_node, m_node);
+               if (ret)
+                       DRM_ERROR("failed to put m_node.\n");
+               break;
+       case IPP_CMD_OUTPUT:
+               /* source memory list */
+               head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
+
+               m_node = list_first_entry(head,
+                       struct drm_exynos_ipp_mem_node, list);
+               if (!m_node) {
+                       DRM_ERROR("empty memory node.\n");
+                       return -ENOMEM;
+               }
+
+               tbuf_id[EXYNOS_DRM_OPS_SRC] = m_node->buf_id;
+
+               ret = ipp_put_mem_node(drm_dev, c_node, m_node);
+               if (ret)
+                       DRM_ERROR("failed to put m_node.\n");
+               break;
+       default:
+               DRM_ERROR("invalid operations.\n");
+               return -EINVAL;
+       }
+
+       if (tbuf_id[EXYNOS_DRM_OPS_DST] != buf_id[EXYNOS_DRM_OPS_DST])
+               DRM_ERROR("failed to match buf_id[%d %d]prop_id[%d]\n",
+                       tbuf_id[1], buf_id[1], property->prop_id);
+
+       /*
+        * command node have event list of destination buffer
+        * If destination buffer enqueue to mem list,
+        * then we make event and link to event list tail.
+        * so, we get first event for first enqueued buffer.
+        */
+       e = list_first_entry(&c_node->event_list,
+               struct drm_exynos_ipp_send_event, base.link);
+
+       if (!e) {
+               DRM_ERROR("empty event.\n");
+               return -EINVAL;
+       }
+
+       do_gettimeofday(&now);
+       DRM_DEBUG_KMS("%s:tv_sec[%ld]tv_usec[%ld]\n"
+               , __func__, now.tv_sec, now.tv_usec);
+       e->event.tv_sec = now.tv_sec;
+       e->event.tv_usec = now.tv_usec;
+       e->event.prop_id = property->prop_id;
+
+       /* set buffer id about source destination */
+       for_each_ipp_ops(i)
+               e->event.buf_id[i] = tbuf_id[i];
+
+       spin_lock_irqsave(&drm_dev->event_lock, flags);
+       list_move_tail(&e->base.link, &e->base.file_priv->event_list);
+       wake_up_interruptible(&e->base.file_priv->event_wait);
+       spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+
+       DRM_DEBUG_KMS("%s:done cmd[%d]prop_id[%d]buf_id[%d]\n", __func__,
+               property->cmd, property->prop_id, tbuf_id[EXYNOS_DRM_OPS_DST]);
+
+       return 0;
+}
+
+void ipp_sched_event(struct work_struct *work)
+{
+       struct drm_exynos_ipp_event_work *event_work =
+               (struct drm_exynos_ipp_event_work *)work;
+       struct exynos_drm_ippdrv *ippdrv;
+       struct drm_exynos_ipp_cmd_node *c_node;
+       int ret;
+
+       if (!event_work) {
+               DRM_ERROR("failed to get event_work.\n");
+               return;
+       }
+
+       DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__,
+               event_work->buf_id[EXYNOS_DRM_OPS_DST]);
+
+       ippdrv = event_work->ippdrv;
+       if (!ippdrv) {
+               DRM_ERROR("failed to get ipp driver.\n");
+               return;
+       }
+
+       c_node = ippdrv->cmd;
+       if (!c_node) {
+               DRM_ERROR("failed to get command node.\n");
+               return;
+       }
+
+       /*
+        * IPP supports command thread, event thread synchronization.
+        * If IPP close immediately from user land, then IPP make
+        * synchronization with command thread, so make complete event.
+        * or going out operations.
+        */
+       if (c_node->state != IPP_STATE_START) {
+               DRM_DEBUG_KMS("%s:bypass state[%d]prop_id[%d]\n",
+                       __func__, c_node->state, c_node->property.prop_id);
+               goto err_completion;
+       }
+
+       mutex_lock(&c_node->event_lock);
+
+       ret = ipp_send_event(ippdrv, c_node, event_work->buf_id);
+       if (ret) {
+               DRM_ERROR("failed to send event.\n");
+               goto err_completion;
+       }
+
+err_completion:
+       if (ipp_is_m2m_cmd(c_node->property.cmd))
+               complete(&c_node->start_complete);
+
+       mutex_unlock(&c_node->event_lock);
+}
+
+static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
+{
+       struct ipp_context *ctx = get_ipp_context(dev);
+       struct exynos_drm_ippdrv *ippdrv;
+       int ret, count = 0;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       /* get ipp driver entry */
+       list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
+               ippdrv->drm_dev = drm_dev;
+
+               ret = ipp_create_id(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv,
+                       &ippdrv->ipp_id);
+               if (ret) {
+                       DRM_ERROR("failed to create id.\n");
+                       goto err_idr;
+               }
+
+               DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]ipp_id[%d]\n", __func__,
+                       count++, (int)ippdrv, ippdrv->ipp_id);
+
+               if (ippdrv->ipp_id == 0) {
+                       DRM_ERROR("failed to get ipp_id[%d]\n",
+                               ippdrv->ipp_id);
+                       goto err_idr;
+               }
+
+               /* store parent device for node */
+               ippdrv->parent_dev = dev;
+
+               /* store event work queue and handler */
+               ippdrv->event_workq = ctx->event_workq;
+               ippdrv->sched_event = ipp_sched_event;
+               INIT_LIST_HEAD(&ippdrv->cmd_list);
+
+               if (is_drm_iommu_supported(drm_dev)) {
+                       ret = drm_iommu_attach_device(drm_dev, ippdrv->dev);
+                       if (ret) {
+                               DRM_ERROR("failed to activate iommu\n");
+                               goto err_iommu;
+                       }
+               }
+       }
+
+       return 0;
+
+err_iommu:
+       /* get ipp driver entry */
+       list_for_each_entry_reverse(ippdrv, &exynos_drm_ippdrv_list, drv_list)
+               if (is_drm_iommu_supported(drm_dev))
+                       drm_iommu_detach_device(drm_dev, ippdrv->dev);
+
+err_idr:
+       idr_remove_all(&ctx->ipp_idr);
+       idr_remove_all(&ctx->prop_idr);
+       idr_destroy(&ctx->ipp_idr);
+       idr_destroy(&ctx->prop_idr);
+       return ret;
+}
+
+static void ipp_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
+{
+       struct exynos_drm_ippdrv *ippdrv;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       /* get ipp driver entry */
+       list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
+               if (is_drm_iommu_supported(drm_dev))
+                       drm_iommu_detach_device(drm_dev, ippdrv->dev);
+
+               ippdrv->drm_dev = NULL;
+               exynos_drm_ippdrv_unregister(ippdrv);
+       }
+}
+
+static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
+               struct drm_file *file)
+{
+       struct drm_exynos_file_private *file_priv = file->driver_priv;
+       struct exynos_drm_ipp_private *priv;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+       if (!priv) {
+               DRM_ERROR("failed to allocate priv.\n");
+               return -ENOMEM;
+       }
+       priv->dev = dev;
+       file_priv->ipp_priv = priv;
+
+       INIT_LIST_HEAD(&priv->event_list);
+
+       DRM_DEBUG_KMS("%s:done priv[0x%x]\n", __func__, (int)priv);
+
+       return 0;
+}
+
+static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
+               struct drm_file *file)
+{
+       struct drm_exynos_file_private *file_priv = file->driver_priv;
+       struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
+       struct exynos_drm_ippdrv *ippdrv = NULL;
+       struct drm_exynos_ipp_cmd_node *c_node, *tc_node;
+       int count = 0;
+
+       DRM_DEBUG_KMS("%s:for priv[0x%x]\n", __func__, (int)priv);
+
+       if (list_empty(&exynos_drm_ippdrv_list)) {
+               DRM_DEBUG_KMS("%s:ippdrv_list is empty.\n", __func__);
+               goto err_clear;
+       }
+
+       list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
+               if (list_empty(&ippdrv->cmd_list))
+                       continue;
+
+               list_for_each_entry_safe(c_node, tc_node,
+                       &ippdrv->cmd_list, list) {
+                       DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]\n",
+                               __func__, count++, (int)ippdrv);
+
+                       if (c_node->priv == priv) {
+                               /*
+                                * userland goto unnormal state. process killed.
+                                * and close the file.
+                                * so, IPP didn't called stop cmd ctrl.
+                                * so, we are make stop operation in this state.
+                                */
+                               if (c_node->state == IPP_STATE_START) {
+                                       ipp_stop_property(drm_dev, ippdrv,
+                                               c_node);
+                                       c_node->state = IPP_STATE_STOP;
+                               }
+
+                               ippdrv->dedicated = false;
+                               ipp_clean_cmd_node(c_node);
+                               if (list_empty(&ippdrv->cmd_list))
+                                       pm_runtime_put_sync(ippdrv->dev);
+                       }
+               }
+       }
+
+err_clear:
+       kfree(priv);
+       return;
+}
+
+static int __devinit ipp_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct ipp_context *ctx;
+       struct exynos_drm_subdrv *subdrv;
+       int ret;
+
+       ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+       if (!ctx)
+               return -ENOMEM;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       mutex_init(&ctx->ipp_lock);
+       mutex_init(&ctx->prop_lock);
+
+       idr_init(&ctx->ipp_idr);
+       idr_init(&ctx->prop_idr);
+
+       /*
+        * create single thread for ipp event
+        * IPP supports event thread for IPP drivers.
+        * IPP driver send event_work to this thread.
+        * and IPP event thread send event to user process.
+        */
+       ctx->event_workq = create_singlethread_workqueue("ipp_event");
+       if (!ctx->event_workq) {
+               dev_err(dev, "failed to create event workqueue\n");
+               ret = -EINVAL;
+               goto err_clear;
+       }
+
+       /*
+        * create single thread for ipp command
+        * IPP supports command thread for user process.
+        * user process make command node using set property ioctl.
+        * and make start_work and send this work to command thread.
+        * and then this command thread start property.
+        */
+       ctx->cmd_workq = create_singlethread_workqueue("ipp_cmd");
+       if (!ctx->cmd_workq) {
+               dev_err(dev, "failed to create cmd workqueue\n");
+               ret = -EINVAL;
+               goto err_event_workq;
+       }
+
+       /* set sub driver informations */
+       subdrv = &ctx->subdrv;
+       subdrv->dev = dev;
+       subdrv->probe = ipp_subdrv_probe;
+       subdrv->remove = ipp_subdrv_remove;
+       subdrv->open = ipp_subdrv_open;
+       subdrv->close = ipp_subdrv_close;
+
+       platform_set_drvdata(pdev, ctx);
+
+       ret = exynos_drm_subdrv_register(subdrv);
+       if (ret < 0) {
+               DRM_ERROR("failed to register drm ipp device.\n");
+               goto err_cmd_workq;
+       }
+
+       dev_info(&pdev->dev, "drm ipp registered successfully.\n");
+
+       return 0;
+
+err_cmd_workq:
+       destroy_workqueue(ctx->cmd_workq);
+err_event_workq:
+       destroy_workqueue(ctx->event_workq);
+err_clear:
+       kfree(ctx);
+       return ret;
+}
+
+static int __devexit ipp_remove(struct platform_device *pdev)
+{
+       struct ipp_context *ctx = platform_get_drvdata(pdev);
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       /* unregister sub driver */
+       exynos_drm_subdrv_unregister(&ctx->subdrv);
+
+       /* remove,destroy ipp idr */
+       idr_remove_all(&ctx->ipp_idr);
+       idr_remove_all(&ctx->prop_idr);
+       idr_destroy(&ctx->ipp_idr);
+       idr_destroy(&ctx->prop_idr);
+
+       mutex_destroy(&ctx->ipp_lock);
+       mutex_destroy(&ctx->prop_lock);
+
+       /* destroy command, event work queue */
+       destroy_workqueue(ctx->cmd_workq);
+       destroy_workqueue(ctx->event_workq);
+
+       kfree(ctx);
+
+       return 0;
+}
+
+static int ipp_power_ctrl(struct ipp_context *ctx, bool enable)
+{
+       DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
+
+       return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int ipp_suspend(struct device *dev)
+{
+       struct ipp_context *ctx = get_ipp_context(dev);
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       if (pm_runtime_suspended(dev))
+               return 0;
+
+       return ipp_power_ctrl(ctx, false);
+}
+
+static int ipp_resume(struct device *dev)
+{
+       struct ipp_context *ctx = get_ipp_context(dev);
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       if (!pm_runtime_suspended(dev))
+               return ipp_power_ctrl(ctx, true);
+
+       return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int ipp_runtime_suspend(struct device *dev)
+{
+       struct ipp_context *ctx = get_ipp_context(dev);
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       return ipp_power_ctrl(ctx, false);
+}
+
+static int ipp_runtime_resume(struct device *dev)
+{
+       struct ipp_context *ctx = get_ipp_context(dev);
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       return ipp_power_ctrl(ctx, true);
+}
+#endif
+
+static const struct dev_pm_ops ipp_pm_ops = {
+       SET_SYSTEM_SLEEP_PM_OPS(ipp_suspend, ipp_resume)
+       SET_RUNTIME_PM_OPS(ipp_runtime_suspend, ipp_runtime_resume, NULL)
+};
+
+struct platform_driver ipp_driver = {
+       .probe          = ipp_probe,
+       .remove         = __devexit_p(ipp_remove),
+       .driver         = {
+               .name   = "exynos-drm-ipp",
+               .owner  = THIS_MODULE,
+               .pm     = &ipp_pm_ops,
+       },
+};
+
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.h b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
new file mode 100644 (file)
index 0000000..28ffac9
--- /dev/null
@@ -0,0 +1,266 @@
+/*
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ *
+ * Authors:
+ *     Eunchul Kim <chulspro.kim@samsung.com>
+ *     Jinyoung Jeon <jy0.jeon@samsung.com>
+ *     Sangmin Lee <lsmin.lee@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_DRM_IPP_H_
+#define _EXYNOS_DRM_IPP_H_
+
+#define for_each_ipp_ops(pos)  \
+       for (pos = 0; pos < EXYNOS_DRM_OPS_MAX; pos++)
+#define for_each_ipp_planar(pos)       \
+       for (pos = 0; pos < EXYNOS_DRM_PLANAR_MAX; pos++)
+
+#define IPP_GET_LCD_WIDTH      _IOR('F', 302, int)
+#define IPP_GET_LCD_HEIGHT     _IOR('F', 303, int)
+#define IPP_SET_WRITEBACK      _IOW('F', 304, u32)
+
+/* definition of state */
+enum drm_exynos_ipp_state {
+       IPP_STATE_IDLE,
+       IPP_STATE_START,
+       IPP_STATE_STOP,
+};
+
+/*
+ * A structure of command work information.
+ * @work: work structure.
+ * @ippdrv: current work ippdrv.
+ * @c_node: command node information.
+ * @ctrl: command control.
+ */
+struct drm_exynos_ipp_cmd_work {
+       struct work_struct      work;
+       struct exynos_drm_ippdrv        *ippdrv;
+       struct drm_exynos_ipp_cmd_node *c_node;
+       enum drm_exynos_ipp_ctrl        ctrl;
+};
+
+/*
+ * A structure of command node.
+ *
+ * @priv: IPP private infomation.
+ * @list: list head to command queue information.
+ * @event_list: list head of event.
+ * @mem_list: list head to source,destination memory queue information.
+ * @cmd_lock: lock for synchronization of access to ioctl.
+ * @mem_lock: lock for synchronization of access to memory nodes.
+ * @event_lock: lock for synchronization of access to scheduled event.
+ * @start_complete: completion of start of command.
+ * @stop_complete: completion of stop of command.
+ * @property: property information.
+ * @start_work: start command work structure.
+ * @stop_work: stop command work structure.
+ * @event_work: event work structure.
+ * @state: state of command node.
+ */
+struct drm_exynos_ipp_cmd_node {
+       struct exynos_drm_ipp_private *priv;
+       struct list_head        list;
+       struct list_head        event_list;
+       struct list_head        mem_list[EXYNOS_DRM_OPS_MAX];
+       struct mutex    cmd_lock;
+       struct mutex    mem_lock;
+       struct mutex    event_lock;
+       struct completion       start_complete;
+       struct completion       stop_complete;
+       struct drm_exynos_ipp_property  property;
+       struct drm_exynos_ipp_cmd_work *start_work;
+       struct drm_exynos_ipp_cmd_work *stop_work;
+       struct drm_exynos_ipp_event_work *event_work;
+       enum drm_exynos_ipp_state       state;
+};
+
+/*
+ * A structure of buffer information.
+ *
+ * @gem_objs: Y, Cb, Cr each gem object.
+ * @base: Y, Cb, Cr each planar address.
+ */
+struct drm_exynos_ipp_buf_info {
+       unsigned long   handles[EXYNOS_DRM_PLANAR_MAX];
+       dma_addr_t      base[EXYNOS_DRM_PLANAR_MAX];
+};
+
+/*
+ * A structure of wb setting infomation.
+ *
+ * @enable: enable flag for wb.
+ * @refresh: HZ of the refresh rate.
+ */
+struct drm_exynos_ipp_set_wb {
+       __u32   enable;
+       __u32   refresh;
+};
+
+/*
+ * A structure of event work information.
+ *
+ * @work: work structure.
+ * @ippdrv: current work ippdrv.
+ * @buf_id: id of src, dst buffer.
+ */
+struct drm_exynos_ipp_event_work {
+       struct work_struct      work;
+       struct exynos_drm_ippdrv *ippdrv;
+       u32     buf_id[EXYNOS_DRM_OPS_MAX];
+};
+
+/*
+ * A structure of source,destination operations.
+ *
+ * @set_fmt: set format of image.
+ * @set_transf: set transform(rotations, flip).
+ * @set_size: set size of region.
+ * @set_addr: set address for dma.
+ */
+struct exynos_drm_ipp_ops {
+       int (*set_fmt)(struct device *dev, u32 fmt);
+       int (*set_transf)(struct device *dev,
+               enum drm_exynos_degree degree,
+               enum drm_exynos_flip flip, bool *swap);
+       int (*set_size)(struct device *dev, int swap,
+               struct drm_exynos_pos *pos, struct drm_exynos_sz *sz);
+       int (*set_addr)(struct device *dev,
+                struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
+               enum drm_exynos_ipp_buf_type buf_type);
+};
+
+/*
+ * A structure of ipp driver.
+ *
+ * @drv_list: list head for registed sub driver information.
+ * @parent_dev: parent device information.
+ * @dev: platform device.
+ * @drm_dev: drm device.
+ * @ipp_id: id of ipp driver.
+ * @dedicated: dedicated ipp device.
+ * @ops: source, destination operations.
+ * @event_workq: event work queue.
+ * @cmd: current command information.
+ * @cmd_list: list head for command information.
+ * @prop_list: property informations of current ipp driver.
+ * @check_property: check property about format, size, buffer.
+ * @reset: reset ipp block.
+ * @start: ipp each device start.
+ * @stop: ipp each device stop.
+ * @sched_event: work schedule handler.
+ */
+struct exynos_drm_ippdrv {
+       struct list_head        drv_list;
+       struct device   *parent_dev;
+       struct device   *dev;
+       struct drm_device       *drm_dev;
+       u32     ipp_id;
+       bool    dedicated;
+       struct exynos_drm_ipp_ops       *ops[EXYNOS_DRM_OPS_MAX];
+       struct workqueue_struct *event_workq;
+       struct drm_exynos_ipp_cmd_node *cmd;
+       struct list_head        cmd_list;
+       struct drm_exynos_ipp_prop_list *prop_list;
+
+       int (*check_property)(struct device *dev,
+               struct drm_exynos_ipp_property *property);
+       int (*reset)(struct device *dev);
+       int (*start)(struct device *dev, enum drm_exynos_ipp_cmd cmd);
+       void (*stop)(struct device *dev, enum drm_exynos_ipp_cmd cmd);
+       void (*sched_event)(struct work_struct *work);
+};
+
+#ifdef CONFIG_DRM_EXYNOS_IPP
+extern int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv);
+extern int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv);
+extern int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
+                                        struct drm_file *file);
+extern int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
+                                        struct drm_file *file);
+extern int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
+                                        struct drm_file *file);
+extern int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
+                                        struct drm_file *file);
+extern int exynos_drm_ippnb_register(struct notifier_block *nb);
+extern int exynos_drm_ippnb_unregister(struct notifier_block *nb);
+extern int exynos_drm_ippnb_send_event(unsigned long val, void *v);
+extern void ipp_sched_cmd(struct work_struct *work);
+extern void ipp_sched_event(struct work_struct *work);
+
+#else
+static inline int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
+{
+       return -ENODEV;
+}
+
+static inline int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
+{
+       return -ENODEV;
+}
+
+static inline int exynos_drm_ipp_get_property(struct drm_device *drm_dev,
+                                               void *data,
+                                               struct drm_file *file_priv)
+{
+       return -ENOTTY;
+}
+
+static inline int exynos_drm_ipp_set_property(struct drm_device *drm_dev,
+                                               void *data,
+                                               struct drm_file *file_priv)
+{
+       return -ENOTTY;
+}
+
+static inline int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev,
+                                               void *data,
+                                               struct drm_file *file)
+{
+       return -ENOTTY;
+}
+
+static inline int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev,
+                                               void *data,
+                                               struct drm_file *file)
+{
+       return -ENOTTY;
+}
+
+static inline int exynos_drm_ippnb_register(struct notifier_block *nb)
+{
+       return -ENODEV;
+}
+
+static inline int exynos_drm_ippnb_unregister(struct notifier_block *nb)
+{
+       return -ENODEV;
+}
+
+static inline int exynos_drm_ippnb_send_event(unsigned long val, void *v)
+{
+       return -ENOTTY;
+}
+#endif
+
+#endif /* _EXYNOS_DRM_IPP_H_ */
+
index 862ca1e..83efc66 100644 (file)
@@ -40,7 +40,7 @@ static const uint32_t formats[] = {
  * CRTC ----------------
  *      ^ start        ^ end
  *
- * There are six cases from a to b.
+ * There are six cases from a to f.
  *
  *             <----- SCREEN ----->
  *             0                 last
@@ -93,11 +93,9 @@ int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
                }
 
                overlay->dma_addr[i] = buffer->dma_addr;
-               overlay->vaddr[i] = buffer->kvaddr;
 
-               DRM_DEBUG_KMS("buffer: %d, vaddr = 0x%lx, dma_addr = 0x%lx\n",
-                               i, (unsigned long)overlay->vaddr[i],
-                               (unsigned long)overlay->dma_addr[i]);
+               DRM_DEBUG_KMS("buffer: %d, dma_addr = 0x%lx\n",
+                               i, (unsigned long)overlay->dma_addr[i]);
        }
 
        actual_w = exynos_plane_get_size(crtc_x, crtc_w, crtc->mode.hdisplay);
@@ -106,16 +104,12 @@ int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
        if (crtc_x < 0) {
                if (actual_w)
                        src_x -= crtc_x;
-               else
-                       src_x += crtc_w;
                crtc_x = 0;
        }
 
        if (crtc_y < 0) {
                if (actual_h)
                        src_y -= crtc_y;
-               else
-                       src_y += crtc_h;
                crtc_y = 0;
        }
 
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
new file mode 100644 (file)
index 0000000..1c23660
--- /dev/null
@@ -0,0 +1,855 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * Authors:
+ *     YoungJun Cho <yj44.cho@samsung.com>
+ *     Eunchul Kim <chulspro.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundationr
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/drmP.h>
+#include <drm/exynos_drm.h>
+#include "regs-rotator.h"
+#include "exynos_drm.h"
+#include "exynos_drm_ipp.h"
+
+/*
+ * Rotator supports image crop/rotator and input/output DMA operations.
+ * input DMA reads image data from the memory.
+ * output DMA writes image data to memory.
+ *
+ * M2M operation : supports crop/scale/rotation/csc so on.
+ * Memory ----> Rotator H/W ----> Memory.
+ */
+
+/*
+ * TODO
+ * 1. check suspend/resume api if needed.
+ * 2. need to check use case platform_device_id.
+ * 3. check src/dst size with, height.
+ * 4. need to add supported list in prop_list.
+ */
+
+#define get_rot_context(dev)   platform_get_drvdata(to_platform_device(dev))
+#define get_ctx_from_ippdrv(ippdrv)    container_of(ippdrv,\
+                                       struct rot_context, ippdrv);
+#define rot_read(offset)               readl(rot->regs + (offset))
+#define rot_write(cfg, offset) writel(cfg, rot->regs + (offset))
+
+enum rot_irq_status {
+       ROT_IRQ_STATUS_COMPLETE = 8,
+       ROT_IRQ_STATUS_ILLEGAL  = 9,
+};
+
+/*
+ * A structure of limitation.
+ *
+ * @min_w: minimum width.
+ * @min_h: minimum height.
+ * @max_w: maximum width.
+ * @max_h: maximum height.
+ * @align: align size.
+ */
+struct rot_limit {
+       u32     min_w;
+       u32     min_h;
+       u32     max_w;
+       u32     max_h;
+       u32     align;
+};
+
+/*
+ * A structure of limitation table.
+ *
+ * @ycbcr420_2p: case of YUV.
+ * @rgb888: case of RGB.
+ */
+struct rot_limit_table {
+       struct rot_limit        ycbcr420_2p;
+       struct rot_limit        rgb888;
+};
+
+/*
+ * A structure of rotator context.
+ * @ippdrv: prepare initialization using ippdrv.
+ * @regs_res: register resources.
+ * @regs: memory mapped io registers.
+ * @clock: rotator gate clock.
+ * @limit_tbl: limitation of rotator.
+ * @irq: irq number.
+ * @cur_buf_id: current operation buffer id.
+ * @suspended: suspended state.
+ */
+struct rot_context {
+       struct exynos_drm_ippdrv        ippdrv;
+       struct resource *regs_res;
+       void __iomem    *regs;
+       struct clk      *clock;
+       struct rot_limit_table  *limit_tbl;
+       int     irq;
+       int     cur_buf_id[EXYNOS_DRM_OPS_MAX];
+       bool    suspended;
+};
+
+static void rotator_reg_set_irq(struct rot_context *rot, bool enable)
+{
+       u32 val = rot_read(ROT_CONFIG);
+
+       if (enable == true)
+               val |= ROT_CONFIG_IRQ;
+       else
+               val &= ~ROT_CONFIG_IRQ;
+
+       rot_write(val, ROT_CONFIG);
+}
+
+static u32 rotator_reg_get_fmt(struct rot_context *rot)
+{
+       u32 val = rot_read(ROT_CONTROL);
+
+       val &= ROT_CONTROL_FMT_MASK;
+
+       return val;
+}
+
+static enum rot_irq_status rotator_reg_get_irq_status(struct rot_context *rot)
+{
+       u32 val = rot_read(ROT_STATUS);
+
+       val = ROT_STATUS_IRQ(val);
+
+       if (val == ROT_STATUS_IRQ_VAL_COMPLETE)
+               return ROT_IRQ_STATUS_COMPLETE;
+
+       return ROT_IRQ_STATUS_ILLEGAL;
+}
+
+static irqreturn_t rotator_irq_handler(int irq, void *arg)
+{
+       struct rot_context *rot = arg;
+       struct exynos_drm_ippdrv *ippdrv = &rot->ippdrv;
+       struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
+       struct drm_exynos_ipp_event_work *event_work = c_node->event_work;
+       enum rot_irq_status irq_status;
+       u32 val;
+
+       /* Get execution result */
+       irq_status = rotator_reg_get_irq_status(rot);
+
+       /* clear status */
+       val = rot_read(ROT_STATUS);
+       val |= ROT_STATUS_IRQ_PENDING((u32)irq_status);
+       rot_write(val, ROT_STATUS);
+
+       if (irq_status == ROT_IRQ_STATUS_COMPLETE) {
+               event_work->ippdrv = ippdrv;
+               event_work->buf_id[EXYNOS_DRM_OPS_DST] =
+                       rot->cur_buf_id[EXYNOS_DRM_OPS_DST];
+               queue_work(ippdrv->event_workq,
+                       (struct work_struct *)event_work);
+       } else
+               DRM_ERROR("the SFR is set illegally\n");
+
+       return IRQ_HANDLED;
+}
+
+static void rotator_align_size(struct rot_context *rot, u32 fmt, u32 *hsize,
+               u32 *vsize)
+{
+       struct rot_limit_table *limit_tbl = rot->limit_tbl;
+       struct rot_limit *limit;
+       u32 mask, val;
+
+       /* Get size limit */
+       if (fmt == ROT_CONTROL_FMT_RGB888)
+               limit = &limit_tbl->rgb888;
+       else
+               limit = &limit_tbl->ycbcr420_2p;
+
+       /* Get mask for rounding to nearest aligned val */
+       mask = ~((1 << limit->align) - 1);
+
+       /* Set aligned width */
+       val = ROT_ALIGN(*hsize, limit->align, mask);
+       if (val < limit->min_w)
+               *hsize = ROT_MIN(limit->min_w, mask);
+       else if (val > limit->max_w)
+               *hsize = ROT_MAX(limit->max_w, mask);
+       else
+               *hsize = val;
+
+       /* Set aligned height */
+       val = ROT_ALIGN(*vsize, limit->align, mask);
+       if (val < limit->min_h)
+               *vsize = ROT_MIN(limit->min_h, mask);
+       else if (val > limit->max_h)
+               *vsize = ROT_MAX(limit->max_h, mask);
+       else
+               *vsize = val;
+}
+
+static int rotator_src_set_fmt(struct device *dev, u32 fmt)
+{
+       struct rot_context *rot = dev_get_drvdata(dev);
+       u32 val;
+
+       val = rot_read(ROT_CONTROL);
+       val &= ~ROT_CONTROL_FMT_MASK;
+
+       switch (fmt) {
+       case DRM_FORMAT_NV12:
+               val |= ROT_CONTROL_FMT_YCBCR420_2P;
+               break;
+       case DRM_FORMAT_XRGB8888:
+               val |= ROT_CONTROL_FMT_RGB888;
+               break;
+       default:
+               DRM_ERROR("invalid image format\n");
+               return -EINVAL;
+       }
+
+       rot_write(val, ROT_CONTROL);
+
+       return 0;
+}
+
+static inline bool rotator_check_reg_fmt(u32 fmt)
+{
+       if ((fmt == ROT_CONTROL_FMT_YCBCR420_2P) ||
+           (fmt == ROT_CONTROL_FMT_RGB888))
+               return true;
+
+       return false;
+}
+
+static int rotator_src_set_size(struct device *dev, int swap,
+               struct drm_exynos_pos *pos,
+               struct drm_exynos_sz *sz)
+{
+       struct rot_context *rot = dev_get_drvdata(dev);
+       u32 fmt, hsize, vsize;
+       u32 val;
+
+       /* Get format */
+       fmt = rotator_reg_get_fmt(rot);
+       if (!rotator_check_reg_fmt(fmt)) {
+               DRM_ERROR("%s:invalid format.\n", __func__);
+               return -EINVAL;
+       }
+
+       /* Align buffer size */
+       hsize = sz->hsize;
+       vsize = sz->vsize;
+       rotator_align_size(rot, fmt, &hsize, &vsize);
+
+       /* Set buffer size configuration */
+       val = ROT_SET_BUF_SIZE_H(vsize) | ROT_SET_BUF_SIZE_W(hsize);
+       rot_write(val, ROT_SRC_BUF_SIZE);
+
+       /* Set crop image position configuration */
+       val = ROT_CROP_POS_Y(pos->y) | ROT_CROP_POS_X(pos->x);
+       rot_write(val, ROT_SRC_CROP_POS);
+       val = ROT_SRC_CROP_SIZE_H(pos->h) | ROT_SRC_CROP_SIZE_W(pos->w);
+       rot_write(val, ROT_SRC_CROP_SIZE);
+
+       return 0;
+}
+
+static int rotator_src_set_addr(struct device *dev,
+               struct drm_exynos_ipp_buf_info *buf_info,
+               u32 buf_id, enum drm_exynos_ipp_buf_type buf_type)
+{
+       struct rot_context *rot = dev_get_drvdata(dev);
+       dma_addr_t addr[EXYNOS_DRM_PLANAR_MAX];
+       u32 val, fmt, hsize, vsize;
+       int i;
+
+       /* Set current buf_id */
+       rot->cur_buf_id[EXYNOS_DRM_OPS_SRC] = buf_id;
+
+       switch (buf_type) {
+       case IPP_BUF_ENQUEUE:
+               /* Set address configuration */
+               for_each_ipp_planar(i)
+                       addr[i] = buf_info->base[i];
+
+               /* Get format */
+               fmt = rotator_reg_get_fmt(rot);
+               if (!rotator_check_reg_fmt(fmt)) {
+                       DRM_ERROR("%s:invalid format.\n", __func__);
+                       return -EINVAL;
+               }
+
+               /* Re-set cb planar for NV12 format */
+               if ((fmt == ROT_CONTROL_FMT_YCBCR420_2P) &&
+                   !addr[EXYNOS_DRM_PLANAR_CB]) {
+
+                       val = rot_read(ROT_SRC_BUF_SIZE);
+                       hsize = ROT_GET_BUF_SIZE_W(val);
+                       vsize = ROT_GET_BUF_SIZE_H(val);
+
+                       /* Set cb planar */
+                       addr[EXYNOS_DRM_PLANAR_CB] =
+                               addr[EXYNOS_DRM_PLANAR_Y] + hsize * vsize;
+               }
+
+               for_each_ipp_planar(i)
+                       rot_write(addr[i], ROT_SRC_BUF_ADDR(i));
+               break;
+       case IPP_BUF_DEQUEUE:
+               for_each_ipp_planar(i)
+                       rot_write(0x0, ROT_SRC_BUF_ADDR(i));
+               break;
+       default:
+               /* Nothing to do */
+               break;
+       }
+
+       return 0;
+}
+
+static int rotator_dst_set_transf(struct device *dev,
+               enum drm_exynos_degree degree,
+               enum drm_exynos_flip flip, bool *swap)
+{
+       struct rot_context *rot = dev_get_drvdata(dev);
+       u32 val;
+
+       /* Set transform configuration */
+       val = rot_read(ROT_CONTROL);
+       val &= ~ROT_CONTROL_FLIP_MASK;
+
+       switch (flip) {
+       case EXYNOS_DRM_FLIP_VERTICAL:
+               val |= ROT_CONTROL_FLIP_VERTICAL;
+               break;
+       case EXYNOS_DRM_FLIP_HORIZONTAL:
+               val |= ROT_CONTROL_FLIP_HORIZONTAL;
+               break;
+       default:
+               /* Flip None */
+               break;
+       }
+
+       val &= ~ROT_CONTROL_ROT_MASK;
+
+       switch (degree) {
+       case EXYNOS_DRM_DEGREE_90:
+               val |= ROT_CONTROL_ROT_90;
+               break;
+       case EXYNOS_DRM_DEGREE_180:
+               val |= ROT_CONTROL_ROT_180;
+               break;
+       case EXYNOS_DRM_DEGREE_270:
+               val |= ROT_CONTROL_ROT_270;
+               break;
+       default:
+               /* Rotation 0 Degree */
+               break;
+       }
+
+       rot_write(val, ROT_CONTROL);
+
+       /* Check degree for setting buffer size swap */
+       if ((degree == EXYNOS_DRM_DEGREE_90) ||
+           (degree == EXYNOS_DRM_DEGREE_270))
+               *swap = true;
+       else
+               *swap = false;
+
+       return 0;
+}
+
+static int rotator_dst_set_size(struct device *dev, int swap,
+               struct drm_exynos_pos *pos,
+               struct drm_exynos_sz *sz)
+{
+       struct rot_context *rot = dev_get_drvdata(dev);
+       u32 val, fmt, hsize, vsize;
+
+       /* Get format */
+       fmt = rotator_reg_get_fmt(rot);
+       if (!rotator_check_reg_fmt(fmt)) {
+               DRM_ERROR("%s:invalid format.\n", __func__);
+               return -EINVAL;
+       }
+
+       /* Align buffer size */
+       hsize = sz->hsize;
+       vsize = sz->vsize;
+       rotator_align_size(rot, fmt, &hsize, &vsize);
+
+       /* Set buffer size configuration */
+       val = ROT_SET_BUF_SIZE_H(vsize) | ROT_SET_BUF_SIZE_W(hsize);
+       rot_write(val, ROT_DST_BUF_SIZE);
+
+       /* Set crop image position configuration */
+       val = ROT_CROP_POS_Y(pos->y) | ROT_CROP_POS_X(pos->x);
+       rot_write(val, ROT_DST_CROP_POS);
+
+       return 0;
+}
+
+static int rotator_dst_set_addr(struct device *dev,
+               struct drm_exynos_ipp_buf_info *buf_info,
+               u32 buf_id, enum drm_exynos_ipp_buf_type buf_type)
+{
+       struct rot_context *rot = dev_get_drvdata(dev);
+       dma_addr_t addr[EXYNOS_DRM_PLANAR_MAX];
+       u32 val, fmt, hsize, vsize;
+       int i;
+
+       /* Set current buf_id */
+       rot->cur_buf_id[EXYNOS_DRM_OPS_DST] = buf_id;
+
+       switch (buf_type) {
+       case IPP_BUF_ENQUEUE:
+               /* Set address configuration */
+               for_each_ipp_planar(i)
+                       addr[i] = buf_info->base[i];
+
+               /* Get format */
+               fmt = rotator_reg_get_fmt(rot);
+               if (!rotator_check_reg_fmt(fmt)) {
+                       DRM_ERROR("%s:invalid format.\n", __func__);
+                       return -EINVAL;
+               }
+
+               /* Re-set cb planar for NV12 format */
+               if ((fmt == ROT_CONTROL_FMT_YCBCR420_2P) &&
+                   !addr[EXYNOS_DRM_PLANAR_CB]) {
+                       /* Get buf size */
+                       val = rot_read(ROT_DST_BUF_SIZE);
+
+                       hsize = ROT_GET_BUF_SIZE_W(val);
+                       vsize = ROT_GET_BUF_SIZE_H(val);
+
+                       /* Set cb planar */
+                       addr[EXYNOS_DRM_PLANAR_CB] =
+                               addr[EXYNOS_DRM_PLANAR_Y] + hsize * vsize;
+               }
+
+               for_each_ipp_planar(i)
+                       rot_write(addr[i], ROT_DST_BUF_ADDR(i));
+               break;
+       case IPP_BUF_DEQUEUE:
+               for_each_ipp_planar(i)
+                       rot_write(0x0, ROT_DST_BUF_ADDR(i));
+               break;
+       default:
+               /* Nothing to do */
+               break;
+       }
+
+       return 0;
+}
+
+static struct exynos_drm_ipp_ops rot_src_ops = {
+       .set_fmt        =       rotator_src_set_fmt,
+       .set_size       =       rotator_src_set_size,
+       .set_addr       =       rotator_src_set_addr,
+};
+
+static struct exynos_drm_ipp_ops rot_dst_ops = {
+       .set_transf     =       rotator_dst_set_transf,
+       .set_size       =       rotator_dst_set_size,
+       .set_addr       =       rotator_dst_set_addr,
+};
+
+static int rotator_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
+{
+       struct drm_exynos_ipp_prop_list *prop_list;
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
+       if (!prop_list) {
+               DRM_ERROR("failed to alloc property list.\n");
+               return -ENOMEM;
+       }
+
+       prop_list->version = 1;
+       prop_list->flip = (1 << EXYNOS_DRM_FLIP_VERTICAL) |
+                               (1 << EXYNOS_DRM_FLIP_HORIZONTAL);
+       prop_list->degree = (1 << EXYNOS_DRM_DEGREE_0) |
+                               (1 << EXYNOS_DRM_DEGREE_90) |
+                               (1 << EXYNOS_DRM_DEGREE_180) |
+                               (1 << EXYNOS_DRM_DEGREE_270);
+       prop_list->csc = 0;
+       prop_list->crop = 0;
+       prop_list->scale = 0;
+
+       ippdrv->prop_list = prop_list;
+
+       return 0;
+}
+
+static inline bool rotator_check_drm_fmt(u32 fmt)
+{
+       switch (fmt) {
+       case DRM_FORMAT_XRGB8888:
+       case DRM_FORMAT_NV12:
+               return true;
+       default:
+               DRM_DEBUG_KMS("%s:not support format\n", __func__);
+               return false;
+       }
+}
+
+static inline bool rotator_check_drm_flip(enum drm_exynos_flip flip)
+{
+       switch (flip) {
+       case EXYNOS_DRM_FLIP_NONE:
+       case EXYNOS_DRM_FLIP_VERTICAL:
+       case EXYNOS_DRM_FLIP_HORIZONTAL:
+               return true;
+       default:
+               DRM_DEBUG_KMS("%s:invalid flip\n", __func__);
+               return false;
+       }
+}
+
+static int rotator_ippdrv_check_property(struct device *dev,
+               struct drm_exynos_ipp_property *property)
+{
+       struct drm_exynos_ipp_config *src_config =
+                                       &property->config[EXYNOS_DRM_OPS_SRC];
+       struct drm_exynos_ipp_config *dst_config =
+                                       &property->config[EXYNOS_DRM_OPS_DST];
+       struct drm_exynos_pos *src_pos = &src_config->pos;
+       struct drm_exynos_pos *dst_pos = &dst_config->pos;
+       struct drm_exynos_sz *src_sz = &src_config->sz;
+       struct drm_exynos_sz *dst_sz = &dst_config->sz;
+       bool swap = false;
+
+       /* Check format configuration */
+       if (src_config->fmt != dst_config->fmt) {
+               DRM_DEBUG_KMS("%s:not support csc feature\n", __func__);
+               return -EINVAL;
+       }
+
+       if (!rotator_check_drm_fmt(dst_config->fmt)) {
+               DRM_DEBUG_KMS("%s:invalid format\n", __func__);
+               return -EINVAL;
+       }
+
+       /* Check transform configuration */
+       if (src_config->degree != EXYNOS_DRM_DEGREE_0) {
+               DRM_DEBUG_KMS("%s:not support source-side rotation\n",
+                       __func__);
+               return -EINVAL;
+       }
+
+       switch (dst_config->degree) {
+       case EXYNOS_DRM_DEGREE_90:
+       case EXYNOS_DRM_DEGREE_270:
+               swap = true;
+       case EXYNOS_DRM_DEGREE_0:
+       case EXYNOS_DRM_DEGREE_180:
+               /* No problem */
+               break;
+       default:
+               DRM_DEBUG_KMS("%s:invalid degree\n", __func__);
+               return -EINVAL;
+       }
+
+       if (src_config->flip != EXYNOS_DRM_FLIP_NONE) {
+               DRM_DEBUG_KMS("%s:not support source-side flip\n", __func__);
+               return -EINVAL;
+       }
+
+       if (!rotator_check_drm_flip(dst_config->flip)) {
+               DRM_DEBUG_KMS("%s:invalid flip\n", __func__);
+               return -EINVAL;
+       }
+
+       /* Check size configuration */
+       if ((src_pos->x + src_pos->w > src_sz->hsize) ||
+               (src_pos->y + src_pos->h > src_sz->vsize)) {
+               DRM_DEBUG_KMS("%s:out of source buffer bound\n", __func__);
+               return -EINVAL;
+       }
+
+       if (swap) {
+               if ((dst_pos->x + dst_pos->h > dst_sz->vsize) ||
+                       (dst_pos->y + dst_pos->w > dst_sz->hsize)) {
+                       DRM_DEBUG_KMS("%s:out of destination buffer bound\n",
+                               __func__);
+                       return -EINVAL;
+               }
+
+               if ((src_pos->w != dst_pos->h) || (src_pos->h != dst_pos->w)) {
+                       DRM_DEBUG_KMS("%s:not support scale feature\n",
+                               __func__);
+                       return -EINVAL;
+               }
+       } else {
+               if ((dst_pos->x + dst_pos->w > dst_sz->hsize) ||
+                       (dst_pos->y + dst_pos->h > dst_sz->vsize)) {
+                       DRM_DEBUG_KMS("%s:out of destination buffer bound\n",
+                               __func__);
+                       return -EINVAL;
+               }
+
+               if ((src_pos->w != dst_pos->w) || (src_pos->h != dst_pos->h)) {
+                       DRM_DEBUG_KMS("%s:not support scale feature\n",
+                               __func__);
+                       return -EINVAL;
+               }
+       }
+
+       return 0;
+}
+
+static int rotator_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
+{
+       struct rot_context *rot = dev_get_drvdata(dev);
+       u32 val;
+
+       if (rot->suspended) {
+               DRM_ERROR("suspended state\n");
+               return -EPERM;
+       }
+
+       if (cmd != IPP_CMD_M2M) {
+               DRM_ERROR("not support cmd: %d\n", cmd);
+               return -EINVAL;
+       }
+
+       /* Set interrupt enable */
+       rotator_reg_set_irq(rot, true);
+
+       val = rot_read(ROT_CONTROL);
+       val |= ROT_CONTROL_START;
+
+       rot_write(val, ROT_CONTROL);
+
+       return 0;
+}
+
+static int __devinit rotator_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct rot_context *rot;
+       struct exynos_drm_ippdrv *ippdrv;
+       int ret;
+
+       rot = devm_kzalloc(dev, sizeof(*rot), GFP_KERNEL);
+       if (!rot) {
+               dev_err(dev, "failed to allocate rot\n");
+               return -ENOMEM;
+       }
+
+       rot->limit_tbl = (struct rot_limit_table *)
+                               platform_get_device_id(pdev)->driver_data;
+
+       rot->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!rot->regs_res) {
+               dev_err(dev, "failed to find registers\n");
+               ret = -ENOENT;
+               goto err_get_resource;
+       }
+
+       rot->regs = devm_request_and_ioremap(dev, rot->regs_res);
+       if (!rot->regs) {
+               dev_err(dev, "failed to map register\n");
+               ret = -ENXIO;
+               goto err_get_resource;
+       }
+
+       rot->irq = platform_get_irq(pdev, 0);
+       if (rot->irq < 0) {
+               dev_err(dev, "failed to get irq\n");
+               ret = rot->irq;
+               goto err_get_irq;
+       }
+
+       ret = request_threaded_irq(rot->irq, NULL, rotator_irq_handler,
+                       IRQF_ONESHOT, "drm_rotator", rot);
+       if (ret < 0) {
+               dev_err(dev, "failed to request irq\n");
+               goto err_get_irq;
+       }
+
+       rot->clock = clk_get(dev, "rotator");
+       if (IS_ERR_OR_NULL(rot->clock)) {
+               dev_err(dev, "failed to get clock\n");
+               ret = PTR_ERR(rot->clock);
+               goto err_clk_get;
+       }
+
+       pm_runtime_enable(dev);
+
+       ippdrv = &rot->ippdrv;
+       ippdrv->dev = dev;
+       ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &rot_src_ops;
+       ippdrv->ops[EXYNOS_DRM_OPS_DST] = &rot_dst_ops;
+       ippdrv->check_property = rotator_ippdrv_check_property;
+       ippdrv->start = rotator_ippdrv_start;
+       ret = rotator_init_prop_list(ippdrv);
+       if (ret < 0) {
+               dev_err(dev, "failed to init property list.\n");
+               goto err_ippdrv_register;
+       }
+
+       DRM_DEBUG_KMS("%s:ippdrv[0x%x]\n", __func__, (int)ippdrv);
+
+       platform_set_drvdata(pdev, rot);
+
+       ret = exynos_drm_ippdrv_register(ippdrv);
+       if (ret < 0) {
+               dev_err(dev, "failed to register drm rotator device\n");
+               goto err_ippdrv_register;
+       }
+
+       dev_info(dev, "The exynos rotator is probed successfully\n");
+
+       return 0;
+
+err_ippdrv_register:
+       devm_kfree(dev, ippdrv->prop_list);
+       pm_runtime_disable(dev);
+       clk_put(rot->clock);
+err_clk_get:
+       free_irq(rot->irq, rot);
+err_get_irq:
+       devm_iounmap(dev, rot->regs);
+err_get_resource:
+       devm_kfree(dev, rot);
+       return ret;
+}
+
+static int __devexit rotator_remove(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct rot_context *rot = dev_get_drvdata(dev);
+       struct exynos_drm_ippdrv *ippdrv = &rot->ippdrv;
+
+       devm_kfree(dev, ippdrv->prop_list);
+       exynos_drm_ippdrv_unregister(ippdrv);
+
+       pm_runtime_disable(dev);
+       clk_put(rot->clock);
+
+       free_irq(rot->irq, rot);
+       devm_iounmap(dev, rot->regs);
+
+       devm_kfree(dev, rot);
+
+       return 0;
+}
+
+struct rot_limit_table rot_limit_tbl = {
+       .ycbcr420_2p = {
+               .min_w = 32,
+               .min_h = 32,
+               .max_w = SZ_32K,
+               .max_h = SZ_32K,
+               .align = 3,
+       },
+       .rgb888 = {
+               .min_w = 8,
+               .min_h = 8,
+               .max_w = SZ_8K,
+               .max_h = SZ_8K,
+               .align = 2,
+       },
+};
+
+struct platform_device_id rotator_driver_ids[] = {
+       {
+               .name           = "exynos-rot",
+               .driver_data    = (unsigned long)&rot_limit_tbl,
+       },
+       {},
+};
+
+static int rotator_clk_crtl(struct rot_context *rot, bool enable)
+{
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       if (enable) {
+               clk_enable(rot->clock);
+               rot->suspended = false;
+       } else {
+               clk_disable(rot->clock);
+               rot->suspended = true;
+       }
+
+       return 0;
+}
+
+
+#ifdef CONFIG_PM_SLEEP
+static int rotator_suspend(struct device *dev)
+{
+       struct rot_context *rot = dev_get_drvdata(dev);
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       if (pm_runtime_suspended(dev))
+               return 0;
+
+       return rotator_clk_crtl(rot, false);
+}
+
+static int rotator_resume(struct device *dev)
+{
+       struct rot_context *rot = dev_get_drvdata(dev);
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       if (!pm_runtime_suspended(dev))
+               return rotator_clk_crtl(rot, true);
+
+       return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int rotator_runtime_suspend(struct device *dev)
+{
+       struct rot_context *rot = dev_get_drvdata(dev);
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       return  rotator_clk_crtl(rot, false);
+}
+
+static int rotator_runtime_resume(struct device *dev)
+{
+       struct rot_context *rot = dev_get_drvdata(dev);
+
+       DRM_DEBUG_KMS("%s\n", __func__);
+
+       return  rotator_clk_crtl(rot, true);
+}
+#endif
+
+static const struct dev_pm_ops rotator_pm_ops = {
+       SET_SYSTEM_SLEEP_PM_OPS(rotator_suspend, rotator_resume)
+       SET_RUNTIME_PM_OPS(rotator_runtime_suspend, rotator_runtime_resume,
+                                                                       NULL)
+};
+
+struct platform_driver rotator_driver = {
+       .probe          = rotator_probe,
+       .remove         = __devexit_p(rotator_remove),
+       .id_table       = rotator_driver_ids,
+       .driver         = {
+               .name   = "exynos-rot",
+               .owner  = THIS_MODULE,
+               .pm     = &rotator_pm_ops,
+       },
+};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.h b/drivers/gpu/drm/exynos/exynos_drm_rotator.h
new file mode 100644 (file)
index 0000000..a2d7a14
--- /dev/null
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ *
+ * Authors:
+ *     YoungJun Cho <yj44.cho@samsung.com>
+ *     Eunchul Kim <chulspro.kim@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef        _EXYNOS_DRM_ROTATOR_H_
+#define        _EXYNOS_DRM_ROTATOR_H_
+
+/* TODO */
+
+#endif
index e4b8a8f..99bfc38 100644 (file)
@@ -39,7 +39,6 @@ struct vidi_win_data {
        unsigned int            fb_height;
        unsigned int            bpp;
        dma_addr_t              dma_addr;
-       void __iomem            *vaddr;
        unsigned int            buf_offsize;
        unsigned int            line_size;      /* bytes */
        bool                    enabled;
@@ -294,7 +293,6 @@ static void vidi_win_mode_set(struct device *dev,
        win_data->fb_width = overlay->fb_width;
        win_data->fb_height = overlay->fb_height;
        win_data->dma_addr = overlay->dma_addr[0] + offset;
-       win_data->vaddr = overlay->vaddr[0] + offset;
        win_data->bpp = overlay->bpp;
        win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) *
                                (overlay->bpp >> 3);
@@ -309,9 +307,7 @@ static void vidi_win_mode_set(struct device *dev,
                        win_data->offset_x, win_data->offset_y);
        DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
                        win_data->ovl_width, win_data->ovl_height);
-       DRM_DEBUG_KMS("paddr = 0x%lx, vaddr = 0x%lx\n",
-                       (unsigned long)win_data->dma_addr,
-                       (unsigned long)win_data->vaddr);
+       DRM_DEBUG_KMS("paddr = 0x%lx\n", (unsigned long)win_data->dma_addr);
        DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n",
                        overlay->fb_width, overlay->crtc_width);
 }
@@ -382,7 +378,6 @@ static void vidi_finish_pageflip(struct drm_device *drm_dev, int crtc)
        struct drm_pending_vblank_event *e, *t;
        struct timeval now;
        unsigned long flags;
-       bool is_checked = false;
 
        spin_lock_irqsave(&drm_dev->event_lock, flags);
 
@@ -392,8 +387,6 @@ static void vidi_finish_pageflip(struct drm_device *drm_dev, int crtc)
                if (crtc != e->pipe)
                        continue;
 
-               is_checked = true;
-
                do_gettimeofday(&now);
                e->event.sequence = 0;
                e->event.tv_sec = now.tv_sec;
@@ -401,22 +394,7 @@ static void vidi_finish_pageflip(struct drm_device *drm_dev, int crtc)
 
                list_move_tail(&e->base.link, &e->base.file_priv->event_list);
                wake_up_interruptible(&e->base.file_priv->event_wait);
-       }
-
-       if (is_checked) {
-               /*
-                * call drm_vblank_put only in case that drm_vblank_get was
-                * called.
-                */
-               if (atomic_read(&drm_dev->vblank_refcount[crtc]) > 0)
-                       drm_vblank_put(drm_dev, crtc);
-
-               /*
-                * don't off vblank if vblank_disable_allowed is 1,
-                * because vblank would be off by timer handler.
-                */
-               if (!drm_dev->vblank_disable_allowed)
-                       drm_vblank_off(drm_dev, crtc);
+               drm_vblank_put(drm_dev, crtc);
        }
 
        spin_unlock_irqrestore(&drm_dev->event_lock, flags);
index 2c115f8..2c46b6c 100644 (file)
 #define MAX_HEIGHT             1080
 #define get_hdmi_context(dev)  platform_get_drvdata(to_platform_device(dev))
 
+/* AVI header and aspect ratio */
+#define HDMI_AVI_VERSION               0x02
+#define HDMI_AVI_LENGTH                0x0D
+#define AVI_PIC_ASPECT_RATIO_16_9      (2 << 4)
+#define AVI_SAME_AS_PIC_ASPECT_RATIO   8
+
+/* AUI header info */
+#define HDMI_AUI_VERSION       0x01
+#define HDMI_AUI_LENGTH        0x0A
+
+/* HDMI infoframe to configure HDMI out packet header, AUI and AVI */
+enum HDMI_PACKET_TYPE {
+       /* refer to Table 5-8 Packet Type in HDMI specification v1.4a */
+       /* InfoFrame packet type */
+       HDMI_PACKET_TYPE_INFOFRAME = 0x80,
+       /* Vendor-Specific InfoFrame */
+       HDMI_PACKET_TYPE_VSI = HDMI_PACKET_TYPE_INFOFRAME + 1,
+       /* Auxiliary Video information InfoFrame */
+       HDMI_PACKET_TYPE_AVI = HDMI_PACKET_TYPE_INFOFRAME + 2,
+       /* Audio information InfoFrame */
+       HDMI_PACKET_TYPE_AUI = HDMI_PACKET_TYPE_INFOFRAME + 4
+};
+
 enum hdmi_type {
        HDMI_TYPE13,
        HDMI_TYPE14,
@@ -74,6 +97,7 @@ struct hdmi_context {
        struct mutex                    hdmi_mutex;
 
        void __iomem                    *regs;
+       void                            *parent_ctx;
        int                             external_irq;
        int                             internal_irq;
 
@@ -84,7 +108,6 @@ struct hdmi_context {
        int cur_conf;
 
        struct hdmi_resources           res;
-       void                            *parent_ctx;
 
        int                             hpd_gpio;
 
@@ -182,6 +205,7 @@ struct hdmi_v13_conf {
        int height;
        int vrefresh;
        bool interlace;
+       int cea_video_id;
        const u8 *hdmiphy_data;
        const struct hdmi_v13_preset_conf *conf;
 };
@@ -353,15 +377,20 @@ static const struct hdmi_v13_preset_conf hdmi_v13_conf_1080p60 = {
 };
 
 static const struct hdmi_v13_conf hdmi_v13_confs[] = {
-       { 1280, 720, 60, false, hdmiphy_v13_conf74_25, &hdmi_v13_conf_720p60 },
-       { 1280, 720, 50, false, hdmiphy_v13_conf74_25, &hdmi_v13_conf_720p60 },
-       { 720, 480, 60, false, hdmiphy_v13_conf27_027, &hdmi_v13_conf_480p },
-       { 1920, 1080, 50, true, hdmiphy_v13_conf74_25, &hdmi_v13_conf_1080i50 },
-       { 1920, 1080, 50, false, hdmiphy_v13_conf148_5,
-                                &hdmi_v13_conf_1080p50 },
-       { 1920, 1080, 60, true, hdmiphy_v13_conf74_25, &hdmi_v13_conf_1080i60 },
-       { 1920, 1080, 60, false, hdmiphy_v13_conf148_5,
-                                &hdmi_v13_conf_1080p60 },
+       { 1280, 720, 60, false, 4, hdmiphy_v13_conf74_25,
+                       &hdmi_v13_conf_720p60 },
+       { 1280, 720, 50, false, 19, hdmiphy_v13_conf74_25,
+                       &hdmi_v13_conf_720p60 },
+       { 720, 480, 60, false, 3, hdmiphy_v13_conf27_027,
+                       &hdmi_v13_conf_480p },
+       { 1920, 1080, 50, true, 20, hdmiphy_v13_conf74_25,
+                       &hdmi_v13_conf_1080i50 },
+       { 1920, 1080, 50, false, 31, hdmiphy_v13_conf148_5,
+                       &hdmi_v13_conf_1080p50 },
+       { 1920, 1080, 60, true, 5, hdmiphy_v13_conf74_25,
+                       &hdmi_v13_conf_1080i60 },
+       { 1920, 1080, 60, false, 16, hdmiphy_v13_conf148_5,
+                       &hdmi_v13_conf_1080p60 },
 };
 
 /* HDMI Version 1.4 */
@@ -479,6 +508,7 @@ struct hdmi_conf {
        int height;
        int vrefresh;
        bool interlace;
+       int cea_video_id;
        const u8 *hdmiphy_data;
        const struct hdmi_preset_conf *conf;
 };
@@ -934,16 +964,21 @@ static const struct hdmi_preset_conf hdmi_conf_1080p60 = {
 };
 
 static const struct hdmi_conf hdmi_confs[] = {
-       { 720, 480, 60, false, hdmiphy_conf27_027, &hdmi_conf_480p60 },
-       { 1280, 720, 50, false, hdmiphy_conf74_25, &hdmi_conf_720p50 },
-       { 1280, 720, 60, false, hdmiphy_conf74_25, &hdmi_conf_720p60 },
-       { 1920, 1080, 50, true, hdmiphy_conf74_25, &hdmi_conf_1080i50 },
-       { 1920, 1080, 60, true, hdmiphy_conf74_25, &hdmi_conf_1080i60 },
-       { 1920, 1080, 30, false, hdmiphy_conf74_176, &hdmi_conf_1080p30 },
-       { 1920, 1080, 50, false, hdmiphy_conf148_5, &hdmi_conf_1080p50 },
-       { 1920, 1080, 60, false, hdmiphy_conf148_5, &hdmi_conf_1080p60 },
+       { 720, 480, 60, false, 3, hdmiphy_conf27_027, &hdmi_conf_480p60 },
+       { 1280, 720, 50, false, 19, hdmiphy_conf74_25, &hdmi_conf_720p50 },
+       { 1280, 720, 60, false, 4, hdmiphy_conf74_25, &hdmi_conf_720p60 },
+       { 1920, 1080, 50, true, 20, hdmiphy_conf74_25, &hdmi_conf_1080i50 },
+       { 1920, 1080, 60, true, 5, hdmiphy_conf74_25, &hdmi_conf_1080i60 },
+       { 1920, 1080, 30, false, 34, hdmiphy_conf74_176, &hdmi_conf_1080p30 },
+       { 1920, 1080, 50, false, 31, hdmiphy_conf148_5, &hdmi_conf_1080p50 },
+       { 1920, 1080, 60, false, 16, hdmiphy_conf148_5, &hdmi_conf_1080p60 },
 };
 
+struct hdmi_infoframe {
+       enum HDMI_PACKET_TYPE type;
+       u8 ver;
+       u8 len;
+};
 
 static inline u32 hdmi_reg_read(struct hdmi_context *hdata, u32 reg_id)
 {
@@ -1267,6 +1302,88 @@ static int hdmi_conf_index(struct hdmi_context *hdata,
        return hdmi_v14_conf_index(mode);
 }
 
+static u8 hdmi_chksum(struct hdmi_context *hdata,
+                       u32 start, u8 len, u32 hdr_sum)
+{
+       int i;
+
+       /* hdr_sum : header0 + header1 + header2
+       * start : start address of packet byte1
+       * len : packet bytes - 1 */
+       for (i = 0; i < len; ++i)
+               hdr_sum += 0xff & hdmi_reg_read(hdata, start + i * 4);
+
+       /* return 2's complement of 8 bit hdr_sum */
+       return (u8)(~(hdr_sum & 0xff) + 1);
+}
+
+static void hdmi_reg_infoframe(struct hdmi_context *hdata,
+                       struct hdmi_infoframe *infoframe)
+{
+       u32 hdr_sum;
+       u8 chksum;
+       u32 aspect_ratio;
+       u32 mod;
+       u32 vic;
+
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+       mod = hdmi_reg_read(hdata, HDMI_MODE_SEL);
+       if (hdata->dvi_mode) {
+               hdmi_reg_writeb(hdata, HDMI_VSI_CON,
+                               HDMI_VSI_CON_DO_NOT_TRANSMIT);
+               hdmi_reg_writeb(hdata, HDMI_AVI_CON,
+                               HDMI_AVI_CON_DO_NOT_TRANSMIT);
+               hdmi_reg_writeb(hdata, HDMI_AUI_CON, HDMI_AUI_CON_NO_TRAN);
+               return;
+       }
+
+       switch (infoframe->type) {
+       case HDMI_PACKET_TYPE_AVI:
+               hdmi_reg_writeb(hdata, HDMI_AVI_CON, HDMI_AVI_CON_EVERY_VSYNC);
+               hdmi_reg_writeb(hdata, HDMI_AVI_HEADER0, infoframe->type);
+               hdmi_reg_writeb(hdata, HDMI_AVI_HEADER1, infoframe->ver);
+               hdmi_reg_writeb(hdata, HDMI_AVI_HEADER2, infoframe->len);
+               hdr_sum = infoframe->type + infoframe->ver + infoframe->len;
+
+               /* Output format zero hardcoded ,RGB YBCR selection */
+               hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 0 << 5 |
+                       AVI_ACTIVE_FORMAT_VALID |
+                       AVI_UNDERSCANNED_DISPLAY_VALID);
+
+               aspect_ratio = AVI_PIC_ASPECT_RATIO_16_9;
+
+               hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2), aspect_ratio |
+                               AVI_SAME_AS_PIC_ASPECT_RATIO);
+
+               if (hdata->type == HDMI_TYPE13)
+                       vic = hdmi_v13_confs[hdata->cur_conf].cea_video_id;
+               else
+                       vic = hdmi_confs[hdata->cur_conf].cea_video_id;
+
+               hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), vic);
+
+               chksum = hdmi_chksum(hdata, HDMI_AVI_BYTE(1),
+                                       infoframe->len, hdr_sum);
+               DRM_DEBUG_KMS("AVI checksum = 0x%x\n", chksum);
+               hdmi_reg_writeb(hdata, HDMI_AVI_CHECK_SUM, chksum);
+               break;
+       case HDMI_PACKET_TYPE_AUI:
+               hdmi_reg_writeb(hdata, HDMI_AUI_CON, 0x02);
+               hdmi_reg_writeb(hdata, HDMI_AUI_HEADER0, infoframe->type);
+               hdmi_reg_writeb(hdata, HDMI_AUI_HEADER1, infoframe->ver);
+               hdmi_reg_writeb(hdata, HDMI_AUI_HEADER2, infoframe->len);
+               hdr_sum = infoframe->type + infoframe->ver + infoframe->len;
+               chksum = hdmi_chksum(hdata, HDMI_AUI_BYTE(1),
+                                       infoframe->len, hdr_sum);
+               DRM_DEBUG_KMS("AUI checksum = 0x%x\n", chksum);
+               hdmi_reg_writeb(hdata, HDMI_AUI_CHECK_SUM, chksum);
+               break;
+       default:
+               break;
+       }
+}
+
 static bool hdmi_is_connected(void *ctx)
 {
        struct hdmi_context *hdata = ctx;
@@ -1293,6 +1410,7 @@ static int hdmi_get_edid(void *ctx, struct drm_connector *connector,
                DRM_DEBUG_KMS("%s : width[%d] x height[%d]\n",
                        (hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"),
                        raw_edid->width_cm, raw_edid->height_cm);
+               kfree(raw_edid);
        } else {
                return -ENODEV;
        }
@@ -1541,6 +1659,8 @@ static void hdmi_conf_reset(struct hdmi_context *hdata)
 
 static void hdmi_conf_init(struct hdmi_context *hdata)
 {
+       struct hdmi_infoframe infoframe;
+
        /* disable HPD interrupts */
        hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL |
                HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG);
@@ -1575,9 +1695,17 @@ static void hdmi_conf_init(struct hdmi_context *hdata)
                hdmi_reg_writeb(hdata, HDMI_V13_AUI_CON, 0x02);
                hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 0x04);
        } else {
+               infoframe.type = HDMI_PACKET_TYPE_AVI;
+               infoframe.ver = HDMI_AVI_VERSION;
+               infoframe.len = HDMI_AVI_LENGTH;
+               hdmi_reg_infoframe(hdata, &infoframe);
+
+               infoframe.type = HDMI_PACKET_TYPE_AUI;
+               infoframe.ver = HDMI_AUI_VERSION;
+               infoframe.len = HDMI_AUI_LENGTH;
+               hdmi_reg_infoframe(hdata, &infoframe);
+
                /* enable AVI packet every vsync, fixes purple line problem */
-               hdmi_reg_writeb(hdata, HDMI_AVI_CON, 0x02);
-               hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 2 << 5);
                hdmi_reg_writemask(hdata, HDMI_CON_1, 2, 3 << 5);
        }
 }
@@ -1875,6 +2003,24 @@ static void hdmiphy_conf_reset(struct hdmi_context *hdata)
        mdelay(10);
 }
 
+static void hdmiphy_poweron(struct hdmi_context *hdata)
+{
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+       if (hdata->type == HDMI_TYPE14)
+               hdmi_reg_writemask(hdata, HDMI_PHY_CON_0, 0,
+                       HDMI_PHY_POWER_OFF_EN);
+}
+
+static void hdmiphy_poweroff(struct hdmi_context *hdata)
+{
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+       if (hdata->type == HDMI_TYPE14)
+               hdmi_reg_writemask(hdata, HDMI_PHY_CON_0, ~0,
+                       HDMI_PHY_POWER_OFF_EN);
+}
+
 static void hdmiphy_conf_apply(struct hdmi_context *hdata)
 {
        const u8 *hdmiphy_data;
@@ -1978,9 +2124,18 @@ static void hdmi_mode_fixup(void *ctx, struct drm_connector *connector,
                        index = hdmi_v14_conf_index(m);
 
                if (index >= 0) {
+                       struct drm_mode_object base;
+                       struct list_head head;
+
                        DRM_INFO("desired mode doesn't exist so\n");
                        DRM_INFO("use the most suitable mode among modes.\n");
+
+                       /* preserve display mode header while copying. */
+                       head = adjusted_mode->head;
+                       base = adjusted_mode->base;
                        memcpy(adjusted_mode, m, sizeof(*m));
+                       adjusted_mode->head = head;
+                       adjusted_mode->base = base;
                        break;
                }
        }
@@ -2034,12 +2189,12 @@ static void hdmi_poweron(struct hdmi_context *hdata)
 
        mutex_unlock(&hdata->hdmi_mutex);
 
-       pm_runtime_get_sync(hdata->dev);
-
        regulator_bulk_enable(res->regul_count, res->regul_bulk);
        clk_enable(res->hdmiphy);
        clk_enable(res->hdmi);
        clk_enable(res->sclk_hdmi);
+
+       hdmiphy_poweron(hdata);
 }
 
 static void hdmi_poweroff(struct hdmi_context *hdata)
@@ -2058,14 +2213,13 @@ static void hdmi_poweroff(struct hdmi_context *hdata)
         * its reset state seems to meet the condition.
         */
        hdmiphy_conf_reset(hdata);
+       hdmiphy_poweroff(hdata);
 
        clk_disable(res->sclk_hdmi);
        clk_disable(res->hdmi);
        clk_disable(res->hdmiphy);
        regulator_bulk_disable(res->regul_count, res->regul_bulk);
 
-       pm_runtime_put_sync(hdata->dev);
-
        mutex_lock(&hdata->hdmi_mutex);
 
        hdata->powered = false;
@@ -2078,16 +2232,18 @@ static void hdmi_dpms(void *ctx, int mode)
 {
        struct hdmi_context *hdata = ctx;
 
-       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+       DRM_DEBUG_KMS("[%d] %s mode %d\n", __LINE__, __func__, mode);
 
        switch (mode) {
        case DRM_MODE_DPMS_ON:
-               hdmi_poweron(hdata);
+               if (pm_runtime_suspended(hdata->dev))
+                       pm_runtime_get_sync(hdata->dev);
                break;
        case DRM_MODE_DPMS_STANDBY:
        case DRM_MODE_DPMS_SUSPEND:
        case DRM_MODE_DPMS_OFF:
-               hdmi_poweroff(hdata);
+               if (!pm_runtime_suspended(hdata->dev))
+                       pm_runtime_put_sync(hdata->dev);
                break;
        default:
                DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode);
@@ -2166,27 +2322,27 @@ static int __devinit hdmi_resources_init(struct hdmi_context *hdata)
        memset(res, 0, sizeof(*res));
 
        /* get clocks, power */
-       res->hdmi = clk_get(dev, "hdmi");
+       res->hdmi = devm_clk_get(dev, "hdmi");
        if (IS_ERR_OR_NULL(res->hdmi)) {
                DRM_ERROR("failed to get clock 'hdmi'\n");
                goto fail;
        }
-       res->sclk_hdmi = clk_get(dev, "sclk_hdmi");
+       res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi");
        if (IS_ERR_OR_NULL(res->sclk_hdmi)) {
                DRM_ERROR("failed to get clock 'sclk_hdmi'\n");
                goto fail;
        }
-       res->sclk_pixel = clk_get(dev, "sclk_pixel");
+       res->sclk_pixel = devm_clk_get(dev, "sclk_pixel");
        if (IS_ERR_OR_NULL(res->sclk_pixel)) {
                DRM_ERROR("failed to get clock 'sclk_pixel'\n");
                goto fail;
        }
-       res->sclk_hdmiphy = clk_get(dev, "sclk_hdmiphy");
+       res->sclk_hdmiphy = devm_clk_get(dev, "sclk_hdmiphy");
        if (IS_ERR_OR_NULL(res->sclk_hdmiphy)) {
                DRM_ERROR("failed to get clock 'sclk_hdmiphy'\n");
                goto fail;
        }
-       res->hdmiphy = clk_get(dev, "hdmiphy");
+       res->hdmiphy = devm_clk_get(dev, "hdmiphy");
        if (IS_ERR_OR_NULL(res->hdmiphy)) {
                DRM_ERROR("failed to get clock 'hdmiphy'\n");
                goto fail;
@@ -2194,7 +2350,7 @@ static int __devinit hdmi_resources_init(struct hdmi_context *hdata)
 
        clk_set_parent(res->sclk_hdmi, res->sclk_pixel);
 
-       res->regul_bulk = kzalloc(ARRAY_SIZE(supply) *
+       res->regul_bulk = devm_kzalloc(dev, ARRAY_SIZE(supply) *
                sizeof(res->regul_bulk[0]), GFP_KERNEL);
        if (!res->regul_bulk) {
                DRM_ERROR("failed to get memory for regulators\n");
@@ -2204,7 +2360,7 @@ static int __devinit hdmi_resources_init(struct hdmi_context *hdata)
                res->regul_bulk[i].supply = supply[i];
                res->regul_bulk[i].consumer = NULL;
        }
-       ret = regulator_bulk_get(dev, ARRAY_SIZE(supply), res->regul_bulk);
+       ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(supply), res->regul_bulk);
        if (ret) {
                DRM_ERROR("failed to get regulators\n");
                goto fail;
@@ -2217,28 +2373,6 @@ fail:
        return -ENODEV;
 }
 
-static int hdmi_resources_cleanup(struct hdmi_context *hdata)
-{
-       struct hdmi_resources *res = &hdata->res;
-
-       regulator_bulk_free(res->regul_count, res->regul_bulk);
-       /* kfree is NULL-safe */
-       kfree(res->regul_bulk);
-       if (!IS_ERR_OR_NULL(res->hdmiphy))
-               clk_put(res->hdmiphy);
-       if (!IS_ERR_OR_NULL(res->sclk_hdmiphy))
-               clk_put(res->sclk_hdmiphy);
-       if (!IS_ERR_OR_NULL(res->sclk_pixel))
-               clk_put(res->sclk_pixel);
-       if (!IS_ERR_OR_NULL(res->sclk_hdmi))
-               clk_put(res->sclk_hdmi);
-       if (!IS_ERR_OR_NULL(res->hdmi))
-               clk_put(res->hdmi);
-       memset(res, 0, sizeof(*res));
-
-       return 0;
-}
-
 static struct i2c_client *hdmi_ddc, *hdmi_hdmiphy;
 
 void hdmi_attach_ddc_client(struct i2c_client *ddc)
@@ -2306,6 +2440,7 @@ static struct platform_device_id hdmi_driver_types[] = {
        }
 };
 
+#ifdef CONFIG_OF
 static struct of_device_id hdmi_match_types[] = {
        {
                .compatible = "samsung,exynos5-hdmi",
@@ -2314,6 +2449,7 @@ static struct of_device_id hdmi_match_types[] = {
                /* end node */
        }
 };
+#endif
 
 static int __devinit hdmi_probe(struct platform_device *pdev)
 {
@@ -2366,6 +2502,8 @@ static int __devinit hdmi_probe(struct platform_device *pdev)
                const struct of_device_id *match;
                match = of_match_node(of_match_ptr(hdmi_match_types),
                                        pdev->dev.of_node);
+               if (match == NULL)
+                       return -ENODEV;
                hdata->type = (enum hdmi_type)match->data;
        } else {
                hdata->type = (enum hdmi_type)platform_get_device_id
@@ -2378,36 +2516,32 @@ static int __devinit hdmi_probe(struct platform_device *pdev)
        ret = hdmi_resources_init(hdata);
 
        if (ret) {
-               ret = -EINVAL;
                DRM_ERROR("hdmi_resources_init failed\n");
-               goto err_data;
+               return -EINVAL;
        }
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!res) {
                DRM_ERROR("failed to find registers\n");
-               ret = -ENOENT;
-               goto err_resource;
+               return -ENOENT;
        }
 
        hdata->regs = devm_request_and_ioremap(&pdev->dev, res);
        if (!hdata->regs) {
                DRM_ERROR("failed to map registers\n");
-               ret = -ENXIO;
-               goto err_resource;
+               return -ENXIO;
        }
 
-       ret = gpio_request(hdata->hpd_gpio, "HPD");
+       ret = devm_gpio_request(&pdev->dev, hdata->hpd_gpio, "HPD");
        if (ret) {
                DRM_ERROR("failed to request HPD gpio\n");
-               goto err_resource;
+               return ret;
        }
 
        /* DDC i2c driver */
        if (i2c_add_driver(&ddc_driver)) {
                DRM_ERROR("failed to register ddc i2c driver\n");
-               ret = -ENOENT;
-               goto err_gpio;
+               return -ENOENT;
        }
 
        hdata->ddc_port = hdmi_ddc;
@@ -2470,11 +2604,6 @@ err_hdmiphy:
        i2c_del_driver(&hdmiphy_driver);
 err_ddc:
        i2c_del_driver(&ddc_driver);
-err_gpio:
-       gpio_free(hdata->hpd_gpio);
-err_resource:
-       hdmi_resources_cleanup(hdata);
-err_data:
        return ret;
 }
 
@@ -2491,9 +2620,6 @@ static int __devexit hdmi_remove(struct platform_device *pdev)
        free_irq(hdata->internal_irq, hdata);
        free_irq(hdata->external_irq, hdata);
 
-       gpio_free(hdata->hpd_gpio);
-
-       hdmi_resources_cleanup(hdata);
 
        /* hdmiphy i2c driver */
        i2c_del_driver(&hdmiphy_driver);
@@ -2509,6 +2635,8 @@ static int hdmi_suspend(struct device *dev)
        struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
        struct hdmi_context *hdata = ctx->ctx;
 
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
        disable_irq(hdata->internal_irq);
        disable_irq(hdata->external_irq);
 
@@ -2516,6 +2644,11 @@ static int hdmi_suspend(struct device *dev)
        if (ctx->drm_dev)
                drm_helper_hpd_irq_event(ctx->drm_dev);
 
+       if (pm_runtime_suspended(dev)) {
+               DRM_DEBUG_KMS("%s : Already suspended\n", __func__);
+               return 0;
+       }
+
        hdmi_poweroff(hdata);
 
        return 0;
@@ -2526,13 +2659,52 @@ static int hdmi_resume(struct device *dev)
        struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
        struct hdmi_context *hdata = ctx->ctx;
 
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+       hdata->hpd = gpio_get_value(hdata->hpd_gpio);
+
        enable_irq(hdata->external_irq);
        enable_irq(hdata->internal_irq);
+
+       if (!pm_runtime_suspended(dev)) {
+               DRM_DEBUG_KMS("%s : Already resumed\n", __func__);
+               return 0;
+       }
+
+       hdmi_poweron(hdata);
+
        return 0;
 }
 #endif
 
-static SIMPLE_DEV_PM_OPS(hdmi_pm_ops, hdmi_suspend, hdmi_resume);
+#ifdef CONFIG_PM_RUNTIME
+static int hdmi_runtime_suspend(struct device *dev)
+{
+       struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
+       struct hdmi_context *hdata = ctx->ctx;
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+       hdmi_poweroff(hdata);
+
+       return 0;
+}
+
+static int hdmi_runtime_resume(struct device *dev)
+{
+       struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
+       struct hdmi_context *hdata = ctx->ctx;
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+       hdmi_poweron(hdata);
+
+       return 0;
+}
+#endif
+
+static const struct dev_pm_ops hdmi_pm_ops = {
+       SET_SYSTEM_SLEEP_PM_OPS(hdmi_suspend, hdmi_resume)
+       SET_RUNTIME_PM_OPS(hdmi_runtime_suspend, hdmi_runtime_resume, NULL)
+};
 
 struct platform_driver hdmi_driver = {
        .probe          = hdmi_probe,
@@ -2542,6 +2714,6 @@ struct platform_driver hdmi_driver = {
                .name   = "exynos-hdmi",
                .owner  = THIS_MODULE,
                .pm     = &hdmi_pm_ops,
-               .of_match_table = hdmi_match_types,
+               .of_match_table = of_match_ptr(hdmi_match_types),
        },
 };
index 27d1720..6206056 100644 (file)
@@ -46,6 +46,7 @@ static const struct i2c_device_id hdmiphy_id[] = {
        { },
 };
 
+#ifdef CONFIG_OF
 static struct of_device_id hdmiphy_match_types[] = {
        {
                .compatible = "samsung,exynos5-hdmiphy",
@@ -53,12 +54,13 @@ static struct of_device_id hdmiphy_match_types[] = {
                /* end node */
        }
 };
+#endif
 
 struct i2c_driver hdmiphy_driver = {
        .driver = {
                .name   = "exynos-hdmiphy",
                .owner  = THIS_MODULE,
-               .of_match_table = hdmiphy_match_types,
+               .of_match_table = of_match_ptr(hdmiphy_match_types),
        },
        .id_table = hdmiphy_id,
        .probe          = hdmiphy_probe,
index e7fbb82..21db895 100644 (file)
 
 #include "exynos_drm_drv.h"
 #include "exynos_drm_hdmi.h"
+#include "exynos_drm_iommu.h"
 
 #define get_mixer_context(dev) platform_get_drvdata(to_platform_device(dev))
 
 struct hdmi_win_data {
        dma_addr_t              dma_addr;
-       void __iomem            *vaddr;
        dma_addr_t              chroma_dma_addr;
-       void __iomem            *chroma_vaddr;
        uint32_t                pixel_format;
        unsigned int            bpp;
        unsigned int            crtc_x;
@@ -59,6 +58,8 @@ struct hdmi_win_data {
        unsigned int            mode_width;
        unsigned int            mode_height;
        unsigned int            scan_flags;
+       bool                    enabled;
+       bool                    resume;
 };
 
 struct mixer_resources {
@@ -80,6 +81,7 @@ enum mixer_version_id {
 
 struct mixer_context {
        struct device           *dev;
+       struct drm_device       *drm_dev;
        int                     pipe;
        bool                    interlace;
        bool                    powered;
@@ -90,6 +92,9 @@ struct mixer_context {
        struct mixer_resources  mixer_res;
        struct hdmi_win_data    win_data[MIXER_WIN_NR];
        enum mixer_version_id   mxr_ver;
+       void                    *parent_ctx;
+       wait_queue_head_t       wait_vsync_queue;
+       atomic_t                wait_vsync_event;
 };
 
 struct mixer_drv_data {
@@ -665,58 +670,22 @@ static void mixer_win_reset(struct mixer_context *ctx)
        spin_unlock_irqrestore(&res->reg_slock, flags);
 }
 
-static void mixer_poweron(struct mixer_context *ctx)
-{
-       struct mixer_resources *res = &ctx->mixer_res;
-
-       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
-
-       mutex_lock(&ctx->mixer_mutex);
-       if (ctx->powered) {
-               mutex_unlock(&ctx->mixer_mutex);
-               return;
-       }
-       ctx->powered = true;
-       mutex_unlock(&ctx->mixer_mutex);
-
-       pm_runtime_get_sync(ctx->dev);
-
-       clk_enable(res->mixer);
-       if (ctx->vp_enabled) {
-               clk_enable(res->vp);
-               clk_enable(res->sclk_mixer);
-       }
-
-       mixer_reg_write(res, MXR_INT_EN, ctx->int_en);
-       mixer_win_reset(ctx);
-}
-
-static void mixer_poweroff(struct mixer_context *ctx)
+static int mixer_iommu_on(void *ctx, bool enable)
 {
-       struct mixer_resources *res = &ctx->mixer_res;
-
-       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+       struct exynos_drm_hdmi_context *drm_hdmi_ctx;
+       struct mixer_context *mdata = ctx;
+       struct drm_device *drm_dev;
 
-       mutex_lock(&ctx->mixer_mutex);
-       if (!ctx->powered)
-               goto out;
-       mutex_unlock(&ctx->mixer_mutex);
+       drm_hdmi_ctx = mdata->parent_ctx;
+       drm_dev = drm_hdmi_ctx->drm_dev;
 
-       ctx->int_en = mixer_reg_read(res, MXR_INT_EN);
+       if (is_drm_iommu_supported(drm_dev)) {
+               if (enable)
+                       return drm_iommu_attach_device(drm_dev, mdata->dev);
 
-       clk_disable(res->mixer);
-       if (ctx->vp_enabled) {
-               clk_disable(res->vp);
-               clk_disable(res->sclk_mixer);
+               drm_iommu_detach_device(drm_dev, mdata->dev);
        }
-
-       pm_runtime_put_sync(ctx->dev);
-
-       mutex_lock(&ctx->mixer_mutex);
-       ctx->powered = false;
-
-out:
-       mutex_unlock(&ctx->mixer_mutex);
+       return 0;
 }
 
 static int mixer_enable_vblank(void *ctx, int pipe)
@@ -746,39 +715,6 @@ static void mixer_disable_vblank(void *ctx)
        mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC);
 }
 
-static void mixer_dpms(void *ctx, int mode)
-{
-       struct mixer_context *mixer_ctx = ctx;
-
-       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
-
-       switch (mode) {
-       case DRM_MODE_DPMS_ON:
-               mixer_poweron(mixer_ctx);
-               break;
-       case DRM_MODE_DPMS_STANDBY:
-       case DRM_MODE_DPMS_SUSPEND:
-       case DRM_MODE_DPMS_OFF:
-               mixer_poweroff(mixer_ctx);
-               break;
-       default:
-               DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode);
-               break;
-       }
-}
-
-static void mixer_wait_for_vblank(void *ctx)
-{
-       struct mixer_context *mixer_ctx = ctx;
-       struct mixer_resources *res = &mixer_ctx->mixer_res;
-       int ret;
-
-       ret = wait_for((mixer_reg_read(res, MXR_INT_STATUS) &
-                               MXR_INT_STATUS_VSYNC), 50);
-       if (ret < 0)
-               DRM_DEBUG_KMS("vblank wait timed out.\n");
-}
-
 static void mixer_win_mode_set(void *ctx,
                              struct exynos_drm_overlay *overlay)
 {
@@ -811,9 +747,7 @@ static void mixer_win_mode_set(void *ctx,
        win_data = &mixer_ctx->win_data[win];
 
        win_data->dma_addr = overlay->dma_addr[0];
-       win_data->vaddr = overlay->vaddr[0];
        win_data->chroma_dma_addr = overlay->dma_addr[1];
-       win_data->chroma_vaddr = overlay->vaddr[1];
        win_data->pixel_format = overlay->pixel_format;
        win_data->bpp = overlay->bpp;
 
@@ -845,6 +779,8 @@ static void mixer_win_commit(void *ctx, int win)
                vp_video_buffer(mixer_ctx, win);
        else
                mixer_graph_buffer(mixer_ctx, win);
+
+       mixer_ctx->win_data[win].enabled = true;
 }
 
 static void mixer_win_disable(void *ctx, int win)
@@ -855,6 +791,14 @@ static void mixer_win_disable(void *ctx, int win)
 
        DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win);
 
+       mutex_lock(&mixer_ctx->mixer_mutex);
+       if (!mixer_ctx->powered) {
+               mutex_unlock(&mixer_ctx->mixer_mutex);
+               mixer_ctx->win_data[win].resume = false;
+               return;
+       }
+       mutex_unlock(&mixer_ctx->mixer_mutex);
+
        spin_lock_irqsave(&res->reg_slock, flags);
        mixer_vsync_set_update(mixer_ctx, false);
 
@@ -862,16 +806,144 @@ static void mixer_win_disable(void *ctx, int win)
 
        mixer_vsync_set_update(mixer_ctx, true);
        spin_unlock_irqrestore(&res->reg_slock, flags);
+
+       mixer_ctx->win_data[win].enabled = false;
+}
+
+static void mixer_wait_for_vblank(void *ctx)
+{
+       struct mixer_context *mixer_ctx = ctx;
+
+       mutex_lock(&mixer_ctx->mixer_mutex);
+       if (!mixer_ctx->powered) {
+               mutex_unlock(&mixer_ctx->mixer_mutex);
+               return;
+       }
+       mutex_unlock(&mixer_ctx->mixer_mutex);
+
+       atomic_set(&mixer_ctx->wait_vsync_event, 1);
+
+       /*
+        * wait for MIXER to signal VSYNC interrupt or return after
+        * timeout which is set to 50ms (refresh rate of 20).
+        */
+       if (!wait_event_timeout(mixer_ctx->wait_vsync_queue,
+                               !atomic_read(&mixer_ctx->wait_vsync_event),
+                               DRM_HZ/20))
+               DRM_DEBUG_KMS("vblank wait timed out.\n");
+}
+
+static void mixer_window_suspend(struct mixer_context *ctx)
+{
+       struct hdmi_win_data *win_data;
+       int i;
+
+       for (i = 0; i < MIXER_WIN_NR; i++) {
+               win_data = &ctx->win_data[i];
+               win_data->resume = win_data->enabled;
+               mixer_win_disable(ctx, i);
+       }
+       mixer_wait_for_vblank(ctx);
+}
+
+static void mixer_window_resume(struct mixer_context *ctx)
+{
+       struct hdmi_win_data *win_data;
+       int i;
+
+       for (i = 0; i < MIXER_WIN_NR; i++) {
+               win_data = &ctx->win_data[i];
+               win_data->enabled = win_data->resume;
+               win_data->resume = false;
+       }
+}
+
+static void mixer_poweron(struct mixer_context *ctx)
+{
+       struct mixer_resources *res = &ctx->mixer_res;
+
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+       mutex_lock(&ctx->mixer_mutex);
+       if (ctx->powered) {
+               mutex_unlock(&ctx->mixer_mutex);
+               return;
+       }
+       ctx->powered = true;
+       mutex_unlock(&ctx->mixer_mutex);
+
+       clk_enable(res->mixer);
+       if (ctx->vp_enabled) {
+               clk_enable(res->vp);
+               clk_enable(res->sclk_mixer);
+       }
+
+       mixer_reg_write(res, MXR_INT_EN, ctx->int_en);
+       mixer_win_reset(ctx);
+
+       mixer_window_resume(ctx);
+}
+
+static void mixer_poweroff(struct mixer_context *ctx)
+{
+       struct mixer_resources *res = &ctx->mixer_res;
+
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+       mutex_lock(&ctx->mixer_mutex);
+       if (!ctx->powered)
+               goto out;
+       mutex_unlock(&ctx->mixer_mutex);
+
+       mixer_window_suspend(ctx);
+
+       ctx->int_en = mixer_reg_read(res, MXR_INT_EN);
+
+       clk_disable(res->mixer);
+       if (ctx->vp_enabled) {
+               clk_disable(res->vp);
+               clk_disable(res->sclk_mixer);
+       }
+
+       mutex_lock(&ctx->mixer_mutex);
+       ctx->powered = false;
+
+out:
+       mutex_unlock(&ctx->mixer_mutex);
+}
+
+static void mixer_dpms(void *ctx, int mode)
+{
+       struct mixer_context *mixer_ctx = ctx;
+
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+       switch (mode) {
+       case DRM_MODE_DPMS_ON:
+               if (pm_runtime_suspended(mixer_ctx->dev))
+                       pm_runtime_get_sync(mixer_ctx->dev);
+               break;
+       case DRM_MODE_DPMS_STANDBY:
+       case DRM_MODE_DPMS_SUSPEND:
+       case DRM_MODE_DPMS_OFF:
+               if (!pm_runtime_suspended(mixer_ctx->dev))
+                       pm_runtime_put_sync(mixer_ctx->dev);
+               break;
+       default:
+               DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode);
+               break;
+       }
 }
 
 static struct exynos_mixer_ops mixer_ops = {
        /* manager */
+       .iommu_on               = mixer_iommu_on,
        .enable_vblank          = mixer_enable_vblank,
        .disable_vblank         = mixer_disable_vblank,
+       .wait_for_vblank        = mixer_wait_for_vblank,
        .dpms                   = mixer_dpms,
 
        /* overlay */
-       .wait_for_vblank        = mixer_wait_for_vblank,
        .win_mode_set           = mixer_win_mode_set,
        .win_commit             = mixer_win_commit,
        .win_disable            = mixer_win_disable,
@@ -884,7 +956,6 @@ static void mixer_finish_pageflip(struct drm_device *drm_dev, int crtc)
        struct drm_pending_vblank_event *e, *t;
        struct timeval now;
        unsigned long flags;
-       bool is_checked = false;
 
        spin_lock_irqsave(&drm_dev->event_lock, flags);
 
@@ -894,7 +965,6 @@ static void mixer_finish_pageflip(struct drm_device *drm_dev, int crtc)
                if (crtc != e->pipe)
                        continue;
 
-               is_checked = true;
                do_gettimeofday(&now);
                e->event.sequence = 0;
                e->event.tv_sec = now.tv_sec;
@@ -902,16 +972,9 @@ static void mixer_finish_pageflip(struct drm_device *drm_dev, int crtc)
 
                list_move_tail(&e->base.link, &e->base.file_priv->event_list);
                wake_up_interruptible(&e->base.file_priv->event_wait);
+               drm_vblank_put(drm_dev, crtc);
        }
 
-       if (is_checked)
-               /*
-                * call drm_vblank_put only in case that drm_vblank_get was
-                * called.
-                */
-               if (atomic_read(&drm_dev->vblank_refcount[crtc]) > 0)
-                       drm_vblank_put(drm_dev, crtc);
-
        spin_unlock_irqrestore(&drm_dev->event_lock, flags);
 }
 
@@ -944,6 +1007,12 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
 
                drm_handle_vblank(drm_hdmi_ctx->drm_dev, ctx->pipe);
                mixer_finish_pageflip(drm_hdmi_ctx->drm_dev, ctx->pipe);
+
+               /* set wait vsync event to zero and wake up queue. */
+               if (atomic_read(&ctx->wait_vsync_event)) {
+                       atomic_set(&ctx->wait_vsync_event, 0);
+                       DRM_WAKEUP(&ctx->wait_vsync_queue);
+               }
        }
 
 out:
@@ -971,57 +1040,45 @@ static int __devinit mixer_resources_init(struct exynos_drm_hdmi_context *ctx,
 
        spin_lock_init(&mixer_res->reg_slock);
 
-       mixer_res->mixer = clk_get(dev, "mixer");
+       mixer_res->mixer = devm_clk_get(dev, "mixer");
        if (IS_ERR_OR_NULL(mixer_res->mixer)) {
                dev_err(dev, "failed to get clock 'mixer'\n");
-               ret = -ENODEV;
-               goto fail;
+               return -ENODEV;
        }
 
-       mixer_res->sclk_hdmi = clk_get(dev, "sclk_hdmi");
+       mixer_res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi");
        if (IS_ERR_OR_NULL(mixer_res->sclk_hdmi)) {
                dev_err(dev, "failed to get clock 'sclk_hdmi'\n");
-               ret = -ENODEV;
-               goto fail;
+               return -ENODEV;
        }
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (res == NULL) {
                dev_err(dev, "get memory resource failed.\n");
-               ret = -ENXIO;
-               goto fail;
+               return -ENXIO;
        }
 
        mixer_res->mixer_regs = devm_ioremap(&pdev->dev, res->start,
                                                        resource_size(res));
        if (mixer_res->mixer_regs == NULL) {
                dev_err(dev, "register mapping failed.\n");
-               ret = -ENXIO;
-               goto fail;
+               return -ENXIO;
        }
 
        res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
        if (res == NULL) {
                dev_err(dev, "get interrupt resource failed.\n");
-               ret = -ENXIO;
-               goto fail;
+               return -ENXIO;
        }
 
        ret = devm_request_irq(&pdev->dev, res->start, mixer_irq_handler,
                                                        0, "drm_mixer", ctx);
        if (ret) {
                dev_err(dev, "request interrupt failed.\n");
-               goto fail;
+               return ret;
        }
        mixer_res->irq = res->start;
 
        return 0;
-
-fail:
-       if (!IS_ERR_OR_NULL(mixer_res->sclk_hdmi))
-               clk_put(mixer_res->sclk_hdmi);
-       if (!IS_ERR_OR_NULL(mixer_res->mixer))
-               clk_put(mixer_res->mixer);
-       return ret;
 }
 
 static int __devinit vp_resources_init(struct exynos_drm_hdmi_context *ctx,
@@ -1031,25 +1088,21 @@ static int __devinit vp_resources_init(struct exynos_drm_hdmi_context *ctx,
        struct device *dev = &pdev->dev;
        struct mixer_resources *mixer_res = &mixer_ctx->mixer_res;
        struct resource *res;
-       int ret;
 
-       mixer_res->vp = clk_get(dev, "vp");
+       mixer_res->vp = devm_clk_get(dev, "vp");
        if (IS_ERR_OR_NULL(mixer_res->vp)) {
                dev_err(dev, "failed to get clock 'vp'\n");
-               ret = -ENODEV;
-               goto fail;
+               return -ENODEV;
        }
-       mixer_res->sclk_mixer = clk_get(dev, "sclk_mixer");
+       mixer_res->sclk_mixer = devm_clk_get(dev, "sclk_mixer");
        if (IS_ERR_OR_NULL(mixer_res->sclk_mixer)) {
                dev_err(dev, "failed to get clock 'sclk_mixer'\n");
-               ret = -ENODEV;
-               goto fail;
+               return -ENODEV;
        }
-       mixer_res->sclk_dac = clk_get(dev, "sclk_dac");
+       mixer_res->sclk_dac = devm_clk_get(dev, "sclk_dac");
        if (IS_ERR_OR_NULL(mixer_res->sclk_dac)) {
                dev_err(dev, "failed to get clock 'sclk_dac'\n");
-               ret = -ENODEV;
-               goto fail;
+               return -ENODEV;
        }
 
        if (mixer_res->sclk_hdmi)
@@ -1058,28 +1111,17 @@ static int __devinit vp_resources_init(struct exynos_drm_hdmi_context *ctx,
        res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
        if (res == NULL) {
                dev_err(dev, "get memory resource failed.\n");
-               ret = -ENXIO;
-               goto fail;
+               return -ENXIO;
        }
 
        mixer_res->vp_regs = devm_ioremap(&pdev->dev, res->start,
                                                        resource_size(res));
        if (mixer_res->vp_regs == NULL) {
                dev_err(dev, "register mapping failed.\n");
-               ret = -ENXIO;
-               goto fail;
+               return -ENXIO;
        }
 
        return 0;
-
-fail:
-       if (!IS_ERR_OR_NULL(mixer_res->sclk_dac))
-               clk_put(mixer_res->sclk_dac);
-       if (!IS_ERR_OR_NULL(mixer_res->sclk_mixer))
-               clk_put(mixer_res->sclk_mixer);
-       if (!IS_ERR_OR_NULL(mixer_res->vp))
-               clk_put(mixer_res->vp);
-       return ret;
 }
 
 static struct mixer_drv_data exynos5_mxr_drv_data = {
@@ -1149,9 +1191,12 @@ static int __devinit mixer_probe(struct platform_device *pdev)
        }
 
        ctx->dev = &pdev->dev;
+       ctx->parent_ctx = (void *)drm_hdmi_ctx;
        drm_hdmi_ctx->ctx = (void *)ctx;
        ctx->vp_enabled = drv->is_vp_enabled;
        ctx->mxr_ver = drv->version;
+       DRM_INIT_WAITQUEUE(&ctx->wait_vsync_queue);
+       atomic_set(&ctx->wait_vsync_event, 0);
 
        platform_set_drvdata(pdev, drm_hdmi_ctx);
 
@@ -1202,13 +1247,66 @@ static int mixer_suspend(struct device *dev)
        struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
        struct mixer_context *ctx = drm_hdmi_ctx->ctx;
 
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+       if (pm_runtime_suspended(dev)) {
+               DRM_DEBUG_KMS("%s : Already suspended\n", __func__);
+               return 0;
+       }
+
        mixer_poweroff(ctx);
 
        return 0;
 }
+
+static int mixer_resume(struct device *dev)
+{
+       struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
+       struct mixer_context *ctx = drm_hdmi_ctx->ctx;
+
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+       if (!pm_runtime_suspended(dev)) {
+               DRM_DEBUG_KMS("%s : Already resumed\n", __func__);
+               return 0;
+       }
+
+       mixer_poweron(ctx);
+
+       return 0;
+}
 #endif
 
-static SIMPLE_DEV_PM_OPS(mixer_pm_ops, mixer_suspend, NULL);
+#ifdef CONFIG_PM_RUNTIME
+static int mixer_runtime_suspend(struct device *dev)
+{
+       struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
+       struct mixer_context *ctx = drm_hdmi_ctx->ctx;
+
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+       mixer_poweroff(ctx);
+
+       return 0;
+}
+
+static int mixer_runtime_resume(struct device *dev)
+{
+       struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
+       struct mixer_context *ctx = drm_hdmi_ctx->ctx;
+
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+       mixer_poweron(ctx);
+
+       return 0;
+}
+#endif
+
+static const struct dev_pm_ops mixer_pm_ops = {
+       SET_SYSTEM_SLEEP_PM_OPS(mixer_suspend, mixer_resume)
+       SET_RUNTIME_PM_OPS(mixer_runtime_suspend, mixer_runtime_resume, NULL)
+};
 
 struct platform_driver mixer_driver = {
        .driver = {
diff --git a/drivers/gpu/drm/exynos/regs-fimc.h b/drivers/gpu/drm/exynos/regs-fimc.h
new file mode 100644 (file)
index 0000000..b4f9ca1
--- /dev/null
@@ -0,0 +1,669 @@
+/* drivers/gpu/drm/exynos/regs-fimc.h
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ *             http://www.samsung.com/
+ *
+ * Register definition file for Samsung Camera Interface (FIMC) driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef EXYNOS_REGS_FIMC_H
+#define EXYNOS_REGS_FIMC_H
+
+/*
+ * Register part
+*/
+/* Input source format */
+#define EXYNOS_CISRCFMT                (0x00)
+/* Window offset */
+#define EXYNOS_CIWDOFST                (0x04)
+/* Global control */
+#define EXYNOS_CIGCTRL         (0x08)
+/* Window offset 2 */
+#define EXYNOS_CIWDOFST2       (0x14)
+/* Y 1st frame start address for output DMA */
+#define EXYNOS_CIOYSA1         (0x18)
+/* Y 2nd frame start address for output DMA */
+#define EXYNOS_CIOYSA2         (0x1c)
+/* Y 3rd frame start address for output DMA */
+#define EXYNOS_CIOYSA3         (0x20)
+/* Y 4th frame start address for output DMA */
+#define EXYNOS_CIOYSA4         (0x24)
+/* Cb 1st frame start address for output DMA */
+#define EXYNOS_CIOCBSA1                (0x28)
+/* Cb 2nd frame start address for output DMA */
+#define EXYNOS_CIOCBSA2                (0x2c)
+/* Cb 3rd frame start address for output DMA */
+#define EXYNOS_CIOCBSA3                (0x30)
+/* Cb 4th frame start address for output DMA */
+#define EXYNOS_CIOCBSA4                (0x34)
+/* Cr 1st frame start address for output DMA */
+#define EXYNOS_CIOCRSA1                (0x38)
+/* Cr 2nd frame start address for output DMA */
+#define EXYNOS_CIOCRSA2                (0x3c)
+/* Cr 3rd frame start address for output DMA */
+#define EXYNOS_CIOCRSA3                (0x40)
+/* Cr 4th frame start address for output DMA */
+#define EXYNOS_CIOCRSA4                (0x44)
+/* Target image format */
+#define EXYNOS_CITRGFMT                (0x48)
+/* Output DMA control */
+#define EXYNOS_CIOCTRL         (0x4c)
+/* Pre-scaler control 1 */
+#define EXYNOS_CISCPRERATIO    (0x50)
+/* Pre-scaler control 2 */
+#define EXYNOS_CISCPREDST              (0x54)
+/* Main scaler control */
+#define EXYNOS_CISCCTRL                (0x58)
+/* Target area */
+#define EXYNOS_CITAREA         (0x5c)
+/* Status */
+#define EXYNOS_CISTATUS                (0x64)
+/* Status2 */
+#define EXYNOS_CISTATUS2               (0x68)
+/* Image capture enable command */
+#define EXYNOS_CIIMGCPT                (0xc0)
+/* Capture sequence */
+#define EXYNOS_CICPTSEQ                (0xc4)
+/* Image effects */
+#define EXYNOS_CIIMGEFF                (0xd0)
+/* Y frame start address for input DMA */
+#define EXYNOS_CIIYSA0         (0xd4)
+/* Cb frame start address for input DMA */
+#define EXYNOS_CIICBSA0                (0xd8)
+/* Cr frame start address for input DMA */
+#define EXYNOS_CIICRSA0                (0xdc)
+/* Input DMA Y Line Skip */
+#define EXYNOS_CIILINESKIP_Y   (0xec)
+/* Input DMA Cb Line Skip */
+#define EXYNOS_CIILINESKIP_CB  (0xf0)
+/* Input DMA Cr Line Skip */
+#define EXYNOS_CIILINESKIP_CR  (0xf4)
+/* Real input DMA image size */
+#define EXYNOS_CIREAL_ISIZE    (0xf8)
+/* Input DMA control */
+#define EXYNOS_MSCTRL          (0xfc)
+/* Y frame start address for input DMA */
+#define EXYNOS_CIIYSA1         (0x144)
+/* Cb frame start address for input DMA */
+#define EXYNOS_CIICBSA1                (0x148)
+/* Cr frame start address for input DMA */
+#define EXYNOS_CIICRSA1                (0x14c)
+/* Output DMA Y offset */
+#define EXYNOS_CIOYOFF         (0x168)
+/* Output DMA CB offset */
+#define EXYNOS_CIOCBOFF                (0x16c)
+/* Output DMA CR offset */
+#define EXYNOS_CIOCROFF                (0x170)
+/* Input DMA Y offset */
+#define EXYNOS_CIIYOFF         (0x174)
+/* Input DMA CB offset */
+#define EXYNOS_CIICBOFF                (0x178)
+/* Input DMA CR offset */
+#define EXYNOS_CIICROFF                (0x17c)
+/* Input DMA original image size */
+#define EXYNOS_ORGISIZE                (0x180)
+/* Output DMA original image size */
+#define EXYNOS_ORGOSIZE                (0x184)
+/* Real output DMA image size */
+#define EXYNOS_CIEXTEN         (0x188)
+/* DMA parameter */
+#define EXYNOS_CIDMAPARAM              (0x18c)
+/* MIPI CSI image format */
+#define EXYNOS_CSIIMGFMT               (0x194)
+/* FIMC Clock Source Select */
+#define EXYNOS_MISC_FIMC               (0x198)
+
+/* Add for FIMC v5.1 */
+/* Output Frame Buffer Sequence */
+#define EXYNOS_CIFCNTSEQ               (0x1fc)
+/* Y 5th frame start address for output DMA */
+#define EXYNOS_CIOYSA5         (0x200)
+/* Y 6th frame start address for output DMA */
+#define EXYNOS_CIOYSA6         (0x204)
+/* Y 7th frame start address for output DMA */
+#define EXYNOS_CIOYSA7         (0x208)
+/* Y 8th frame start address for output DMA */
+#define EXYNOS_CIOYSA8         (0x20c)
+/* Y 9th frame start address for output DMA */
+#define EXYNOS_CIOYSA9         (0x210)
+/* Y 10th frame start address for output DMA */
+#define EXYNOS_CIOYSA10                (0x214)
+/* Y 11th frame start address for output DMA */
+#define EXYNOS_CIOYSA11                (0x218)
+/* Y 12th frame start address for output DMA */
+#define EXYNOS_CIOYSA12                (0x21c)
+/* Y 13th frame start address for output DMA */
+#define EXYNOS_CIOYSA13                (0x220)
+/* Y 14th frame start address for output DMA */
+#define EXYNOS_CIOYSA14                (0x224)
+/* Y 15th frame start address for output DMA */
+#define EXYNOS_CIOYSA15                (0x228)
+/* Y 16th frame start address for output DMA */
+#define EXYNOS_CIOYSA16                (0x22c)
+/* Y 17th frame start address for output DMA */
+#define EXYNOS_CIOYSA17                (0x230)
+/* Y 18th frame start address for output DMA */
+#define EXYNOS_CIOYSA18                (0x234)
+/* Y 19th frame start address for output DMA */
+#define EXYNOS_CIOYSA19                (0x238)
+/* Y 20th frame start address for output DMA */
+#define EXYNOS_CIOYSA20                (0x23c)
+/* Y 21th frame start address for output DMA */
+#define EXYNOS_CIOYSA21                (0x240)
+/* Y 22th frame start address for output DMA */
+#define EXYNOS_CIOYSA22                (0x244)
+/* Y 23th frame start address for output DMA */
+#define EXYNOS_CIOYSA23                (0x248)
+/* Y 24th frame start address for output DMA */
+#define EXYNOS_CIOYSA24                (0x24c)
+/* Y 25th frame start address for output DMA */
+#define EXYNOS_CIOYSA25                (0x250)
+/* Y 26th frame start address for output DMA */
+#define EXYNOS_CIOYSA26                (0x254)
+/* Y 27th frame start address for output DMA */
+#define EXYNOS_CIOYSA27                (0x258)
+/* Y 28th frame start address for output DMA */
+#define EXYNOS_CIOYSA28                (0x25c)
+/* Y 29th frame start address for output DMA */
+#define EXYNOS_CIOYSA29                (0x260)
+/* Y 30th frame start address for output DMA */
+#define EXYNOS_CIOYSA30                (0x264)
+/* Y 31th frame start address for output DMA */
+#define EXYNOS_CIOYSA31                (0x268)
+/* Y 32th frame start address for output DMA */
+#define EXYNOS_CIOYSA32                (0x26c)
+
+/* CB 5th frame start address for output DMA */
+#define EXYNOS_CIOCBSA5                (0x270)
+/* CB 6th frame start address for output DMA */
+#define EXYNOS_CIOCBSA6                (0x274)
+/* CB 7th frame start address for output DMA */
+#define EXYNOS_CIOCBSA7                (0x278)
+/* CB 8th frame start address for output DMA */
+#define EXYNOS_CIOCBSA8                (0x27c)
+/* CB 9th frame start address for output DMA */
+#define EXYNOS_CIOCBSA9                (0x280)
+/* CB 10th frame start address for output DMA */
+#define EXYNOS_CIOCBSA10               (0x284)
+/* CB 11th frame start address for output DMA */
+#define EXYNOS_CIOCBSA11               (0x288)
+/* CB 12th frame start address for output DMA */
+#define EXYNOS_CIOCBSA12               (0x28c)
+/* CB 13th frame start address for output DMA */
+#define EXYNOS_CIOCBSA13               (0x290)
+/* CB 14th frame start address for output DMA */
+#define EXYNOS_CIOCBSA14               (0x294)
+/* CB 15th frame start address for output DMA */
+#define EXYNOS_CIOCBSA15               (0x298)
+/* CB 16th frame start address for output DMA */
+#define EXYNOS_CIOCBSA16               (0x29c)
+/* CB 17th frame start address for output DMA */
+#define EXYNOS_CIOCBSA17               (0x2a0)
+/* CB 18th frame start address for output DMA */
+#define EXYNOS_CIOCBSA18               (0x2a4)
+/* CB 19th frame start address for output DMA */
+#define EXYNOS_CIOCBSA19               (0x2a8)
+/* CB 20th frame start address for output DMA */
+#define EXYNOS_CIOCBSA20               (0x2ac)
+/* CB 21th frame start address for output DMA */
+#define EXYNOS_CIOCBSA21               (0x2b0)
+/* CB 22th frame start address for output DMA */
+#define EXYNOS_CIOCBSA22               (0x2b4)
+/* CB 23th frame start address for output DMA */
+#define EXYNOS_CIOCBSA23               (0x2b8)
+/* CB 24th frame start address for output DMA */
+#define EXYNOS_CIOCBSA24               (0x2bc)
+/* CB 25th frame start address for output DMA */
+#define EXYNOS_CIOCBSA25               (0x2c0)
+/* CB 26th frame start address for output DMA */
+#define EXYNOS_CIOCBSA26               (0x2c4)
+/* CB 27th frame start address for output DMA */
+#define EXYNOS_CIOCBSA27               (0x2c8)
+/* CB 28th frame start address for output DMA */
+#define EXYNOS_CIOCBSA28               (0x2cc)
+/* CB 29th frame start address for output DMA */
+#define EXYNOS_CIOCBSA29               (0x2d0)
+/* CB 30th frame start address for output DMA */
+#define EXYNOS_CIOCBSA30               (0x2d4)
+/* CB 31th frame start address for output DMA */
+#define EXYNOS_CIOCBSA31               (0x2d8)
+/* CB 32th frame start address for output DMA */
+#define EXYNOS_CIOCBSA32               (0x2dc)
+
+/* CR 5th frame start address for output DMA */
+#define EXYNOS_CIOCRSA5                (0x2e0)
+/* CR 6th frame start address for output DMA */
+#define EXYNOS_CIOCRSA6                (0x2e4)
+/* CR 7th frame start address for output DMA */
+#define EXYNOS_CIOCRSA7                (0x2e8)
+/* CR 8th frame start address for output DMA */
+#define EXYNOS_CIOCRSA8                (0x2ec)
+/* CR 9th frame start address for output DMA */
+#define EXYNOS_CIOCRSA9                (0x2f0)
+/* CR 10th frame start address for output DMA */
+#define EXYNOS_CIOCRSA10               (0x2f4)
+/* CR 11th frame start address for output DMA */
+#define EXYNOS_CIOCRSA11               (0x2f8)
+/* CR 12th frame start address for output DMA */
+#define EXYNOS_CIOCRSA12               (0x2fc)
+/* CR 13th frame start address for output DMA */
+#define EXYNOS_CIOCRSA13               (0x300)
+/* CR 14th frame start address for output DMA */
+#define EXYNOS_CIOCRSA14               (0x304)
+/* CR 15th frame start address for output DMA */
+#define EXYNOS_CIOCRSA15               (0x308)
+/* CR 16th frame start address for output DMA */
+#define EXYNOS_CIOCRSA16               (0x30c)
+/* CR 17th frame start address for output DMA */
+#define EXYNOS_CIOCRSA17               (0x310)
+/* CR 18th frame start address for output DMA */
+#define EXYNOS_CIOCRSA18               (0x314)
+/* CR 19th frame start address for output DMA */
+#define EXYNOS_CIOCRSA19               (0x318)
+/* CR 20th frame start address for output DMA */
+#define EXYNOS_CIOCRSA20               (0x31c)
+/* CR 21th frame start address for output DMA */
+#define EXYNOS_CIOCRSA21               (0x320)
+/* CR 22th frame start address for output DMA */
+#define EXYNOS_CIOCRSA22               (0x324)
+/* CR 23th frame start address for output DMA */
+#define EXYNOS_CIOCRSA23               (0x328)
+/* CR 24th frame start address for output DMA */
+#define EXYNOS_CIOCRSA24               (0x32c)
+/* CR 25th frame start address for output DMA */
+#define EXYNOS_CIOCRSA25               (0x330)
+/* CR 26th frame start address for output DMA */
+#define EXYNOS_CIOCRSA26               (0x334)
+/* CR 27th frame start address for output DMA */
+#define EXYNOS_CIOCRSA27               (0x338)
+/* CR 28th frame start address for output DMA */
+#define EXYNOS_CIOCRSA28               (0x33c)
+/* CR 29th frame start address for output DMA */
+#define EXYNOS_CIOCRSA29               (0x340)
+/* CR 30th frame start address for output DMA */
+#define EXYNOS_CIOCRSA30               (0x344)
+/* CR 31th frame start address for output DMA */
+#define EXYNOS_CIOCRSA31               (0x348)
+/* CR 32th frame start address for output DMA */
+#define EXYNOS_CIOCRSA32               (0x34c)
+
+/*
+ * Macro part
+*/
+/* frame start address 1 ~ 4, 5 ~ 32 */
+/* Number of Default PingPong Memory */
+#define DEF_PP         4
+#define EXYNOS_CIOYSA(__x)             \
+       (((__x) < DEF_PP) ?     \
+        (EXYNOS_CIOYSA1  + (__x) * 4) : \
+       (EXYNOS_CIOYSA5  + ((__x) - DEF_PP) * 4))
+#define EXYNOS_CIOCBSA(__x)    \
+       (((__x) < DEF_PP) ?     \
+        (EXYNOS_CIOCBSA1 + (__x) * 4) : \
+       (EXYNOS_CIOCBSA5 + ((__x) - DEF_PP) * 4))
+#define EXYNOS_CIOCRSA(__x)    \
+       (((__x) < DEF_PP) ?     \
+        (EXYNOS_CIOCRSA1 + (__x) * 4) : \
+       (EXYNOS_CIOCRSA5 + ((__x) - DEF_PP) * 4))
+/* Number of Default PingPong Memory */
+#define DEF_IPP                1
+#define EXYNOS_CIIYSA(__x)             \
+       (((__x) < DEF_IPP) ?    \
+        (EXYNOS_CIIYSA0) : (EXYNOS_CIIYSA1))
+#define EXYNOS_CIICBSA(__x)    \
+       (((__x) < DEF_IPP) ?    \
+        (EXYNOS_CIICBSA0) : (EXYNOS_CIICBSA1))
+#define EXYNOS_CIICRSA(__x)    \
+       (((__x) < DEF_IPP) ?    \
+        (EXYNOS_CIICRSA0) : (EXYNOS_CIICRSA1))
+
+#define EXYNOS_CISRCFMT_SOURCEHSIZE(x)         ((x) << 16)
+#define EXYNOS_CISRCFMT_SOURCEVSIZE(x)         ((x) << 0)
+
+#define EXYNOS_CIWDOFST_WINHOROFST(x)          ((x) << 16)
+#define EXYNOS_CIWDOFST_WINVEROFST(x)          ((x) << 0)
+
+#define EXYNOS_CIWDOFST2_WINHOROFST2(x)                ((x) << 16)
+#define EXYNOS_CIWDOFST2_WINVEROFST2(x)                ((x) << 0)
+
+#define EXYNOS_CITRGFMT_TARGETHSIZE(x)         (((x) & 0x1fff) << 16)
+#define EXYNOS_CITRGFMT_TARGETVSIZE(x)         (((x) & 0x1fff) << 0)
+
+#define EXYNOS_CISCPRERATIO_SHFACTOR(x)                ((x) << 28)
+#define EXYNOS_CISCPRERATIO_PREHORRATIO(x)             ((x) << 16)
+#define EXYNOS_CISCPRERATIO_PREVERRATIO(x)             ((x) << 0)
+
+#define EXYNOS_CISCPREDST_PREDSTWIDTH(x)               ((x) << 16)
+#define EXYNOS_CISCPREDST_PREDSTHEIGHT(x)              ((x) << 0)
+
+#define EXYNOS_CISCCTRL_MAINHORRATIO(x)                ((x) << 16)
+#define EXYNOS_CISCCTRL_MAINVERRATIO(x)                ((x) << 0)
+
+#define EXYNOS_CITAREA_TARGET_AREA(x)          ((x) << 0)
+
+#define EXYNOS_CISTATUS_GET_FRAME_COUNT(x)             (((x) >> 26) & 0x3)
+#define EXYNOS_CISTATUS_GET_FRAME_END(x)               (((x) >> 17) & 0x1)
+#define EXYNOS_CISTATUS_GET_LAST_CAPTURE_END(x)        (((x) >> 16) & 0x1)
+#define EXYNOS_CISTATUS_GET_LCD_STATUS(x)              (((x) >> 9) & 0x1)
+#define EXYNOS_CISTATUS_GET_ENVID_STATUS(x)    (((x) >> 8) & 0x1)
+
+#define EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(x)      (((x) >> 7) & 0x3f)
+#define EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(x)     ((x) & 0x3f)
+
+#define EXYNOS_CIIMGEFF_FIN(x)                 ((x & 0x7) << 26)
+#define EXYNOS_CIIMGEFF_PAT_CB(x)                      ((x) << 13)
+#define EXYNOS_CIIMGEFF_PAT_CR(x)                      ((x) << 0)
+
+#define EXYNOS_CIILINESKIP(x)                  (((x) & 0xf) << 24)
+
+#define EXYNOS_CIREAL_ISIZE_HEIGHT(x)          ((x) << 16)
+#define EXYNOS_CIREAL_ISIZE_WIDTH(x)           ((x) << 0)
+
+#define EXYNOS_MSCTRL_SUCCESSIVE_COUNT(x)              ((x) << 24)
+#define EXYNOS_MSCTRL_GET_INDMA_STATUS(x)              ((x) & 0x1)
+
+#define EXYNOS_CIOYOFF_VERTICAL(x)                     ((x) << 16)
+#define EXYNOS_CIOYOFF_HORIZONTAL(x)           ((x) << 0)
+
+#define EXYNOS_CIOCBOFF_VERTICAL(x)            ((x) << 16)
+#define EXYNOS_CIOCBOFF_HORIZONTAL(x)          ((x) << 0)
+
+#define EXYNOS_CIOCROFF_VERTICAL(x)            ((x) << 16)
+#define EXYNOS_CIOCROFF_HORIZONTAL(x)          ((x) << 0)
+
+#define EXYNOS_CIIYOFF_VERTICAL(x)                     ((x) << 16)
+#define EXYNOS_CIIYOFF_HORIZONTAL(x)           ((x) << 0)
+
+#define EXYNOS_CIICBOFF_VERTICAL(x)            ((x) << 16)
+#define EXYNOS_CIICBOFF_HORIZONTAL(x)          ((x) << 0)
+
+#define EXYNOS_CIICROFF_VERTICAL(x)            ((x) << 16)
+#define EXYNOS_CIICROFF_HORIZONTAL(x)          ((x) << 0)
+
+#define EXYNOS_ORGISIZE_VERTICAL(x)            ((x) << 16)
+#define EXYNOS_ORGISIZE_HORIZONTAL(x)          ((x) << 0)
+
+#define EXYNOS_ORGOSIZE_VERTICAL(x)            ((x) << 16)
+#define EXYNOS_ORGOSIZE_HORIZONTAL(x)          ((x) << 0)
+
+#define EXYNOS_CIEXTEN_TARGETH_EXT(x)          ((((x) & 0x2000) >> 13) << 26)
+#define EXYNOS_CIEXTEN_TARGETV_EXT(x)          ((((x) & 0x2000) >> 13) << 24)
+#define EXYNOS_CIEXTEN_MAINHORRATIO_EXT(x)             (((x) & 0x3F) << 10)
+#define EXYNOS_CIEXTEN_MAINVERRATIO_EXT(x)             ((x) & 0x3F)
+
+/*
+ * Bit definition part
+*/
+/* Source format register */
+#define EXYNOS_CISRCFMT_ITU601_8BIT            (1 << 31)
+#define EXYNOS_CISRCFMT_ITU656_8BIT            (0 << 31)
+#define EXYNOS_CISRCFMT_ITU601_16BIT           (1 << 29)
+#define EXYNOS_CISRCFMT_ORDER422_YCBYCR                (0 << 14)
+#define EXYNOS_CISRCFMT_ORDER422_YCRYCB                (1 << 14)
+#define EXYNOS_CISRCFMT_ORDER422_CBYCRY                (2 << 14)
+#define EXYNOS_CISRCFMT_ORDER422_CRYCBY                (3 << 14)
+/* ITU601 16bit only */
+#define EXYNOS_CISRCFMT_ORDER422_Y4CBCRCBCR    (0 << 14)
+/* ITU601 16bit only */
+#define EXYNOS_CISRCFMT_ORDER422_Y4CRCBCRCB    (1 << 14)
+
+/* Window offset register */
+#define EXYNOS_CIWDOFST_WINOFSEN                       (1 << 31)
+#define EXYNOS_CIWDOFST_CLROVFIY                       (1 << 30)
+#define EXYNOS_CIWDOFST_CLROVRLB                       (1 << 29)
+#define EXYNOS_CIWDOFST_WINHOROFST_MASK                (0x7ff << 16)
+#define EXYNOS_CIWDOFST_CLROVFICB                      (1 << 15)
+#define EXYNOS_CIWDOFST_CLROVFICR                      (1 << 14)
+#define EXYNOS_CIWDOFST_WINVEROFST_MASK                (0xfff << 0)
+
+/* Global control register */
+#define EXYNOS_CIGCTRL_SWRST                   (1 << 31)
+#define EXYNOS_CIGCTRL_CAMRST_A                        (1 << 30)
+#define EXYNOS_CIGCTRL_SELCAM_ITU_B            (0 << 29)
+#define EXYNOS_CIGCTRL_SELCAM_ITU_A            (1 << 29)
+#define EXYNOS_CIGCTRL_SELCAM_ITU_MASK         (1 << 29)
+#define EXYNOS_CIGCTRL_TESTPATTERN_NORMAL              (0 << 27)
+#define EXYNOS_CIGCTRL_TESTPATTERN_COLOR_BAR   (1 << 27)
+#define EXYNOS_CIGCTRL_TESTPATTERN_HOR_INC             (2 << 27)
+#define EXYNOS_CIGCTRL_TESTPATTERN_VER_INC             (3 << 27)
+#define EXYNOS_CIGCTRL_TESTPATTERN_MASK                (3 << 27)
+#define EXYNOS_CIGCTRL_TESTPATTERN_SHIFT               (27)
+#define EXYNOS_CIGCTRL_INVPOLPCLK                      (1 << 26)
+#define EXYNOS_CIGCTRL_INVPOLVSYNC                     (1 << 25)
+#define EXYNOS_CIGCTRL_INVPOLHREF                      (1 << 24)
+#define EXYNOS_CIGCTRL_IRQ_OVFEN                       (1 << 22)
+#define EXYNOS_CIGCTRL_HREF_MASK                       (1 << 21)
+#define EXYNOS_CIGCTRL_IRQ_EDGE                        (0 << 20)
+#define EXYNOS_CIGCTRL_IRQ_LEVEL                       (1 << 20)
+#define EXYNOS_CIGCTRL_IRQ_CLR                 (1 << 19)
+#define EXYNOS_CIGCTRL_IRQ_END_DISABLE         (1 << 18)
+#define EXYNOS_CIGCTRL_IRQ_DISABLE                     (0 << 16)
+#define EXYNOS_CIGCTRL_IRQ_ENABLE                      (1 << 16)
+#define EXYNOS_CIGCTRL_SHADOW_DISABLE          (1 << 12)
+#define EXYNOS_CIGCTRL_CAM_JPEG                        (1 << 8)
+#define EXYNOS_CIGCTRL_SELCAM_MIPI_B           (0 << 7)
+#define EXYNOS_CIGCTRL_SELCAM_MIPI_A           (1 << 7)
+#define EXYNOS_CIGCTRL_SELCAM_MIPI_MASK                (1 << 7)
+#define EXYNOS_CIGCTRL_SELWB_CAMIF_CAMERA      (0 << 6)
+#define EXYNOS_CIGCTRL_SELWB_CAMIF_WRITEBACK   (1 << 6)
+#define EXYNOS_CIGCTRL_SELWRITEBACK_MASK               (1 << 10)
+#define EXYNOS_CIGCTRL_SELWRITEBACK_A          (1 << 10)
+#define EXYNOS_CIGCTRL_SELWRITEBACK_B          (0 << 10)
+#define EXYNOS_CIGCTRL_SELWB_CAMIF_MASK                (1 << 6)
+#define EXYNOS_CIGCTRL_CSC_ITU601                      (0 << 5)
+#define EXYNOS_CIGCTRL_CSC_ITU709                      (1 << 5)
+#define EXYNOS_CIGCTRL_CSC_MASK                        (1 << 5)
+#define EXYNOS_CIGCTRL_INVPOLHSYNC                     (1 << 4)
+#define EXYNOS_CIGCTRL_SELCAM_FIMC_ITU         (0 << 3)
+#define EXYNOS_CIGCTRL_SELCAM_FIMC_MIPI                (1 << 3)
+#define EXYNOS_CIGCTRL_SELCAM_FIMC_MASK                (1 << 3)
+#define EXYNOS_CIGCTRL_PROGRESSIVE                     (0 << 0)
+#define EXYNOS_CIGCTRL_INTERLACE                       (1 << 0)
+
+/* Window offset2 register */
+#define EXYNOS_CIWDOFST_WINHOROFST2_MASK               (0xfff << 16)
+#define EXYNOS_CIWDOFST_WINVEROFST2_MASK               (0xfff << 16)
+
+/* Target format register */
+#define EXYNOS_CITRGFMT_INROT90_CLOCKWISE              (1 << 31)
+#define EXYNOS_CITRGFMT_OUTFORMAT_YCBCR420             (0 << 29)
+#define EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422             (1 << 29)
+#define EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422_1PLANE      (2 << 29)
+#define EXYNOS_CITRGFMT_OUTFORMAT_RGB          (3 << 29)
+#define EXYNOS_CITRGFMT_OUTFORMAT_MASK         (3 << 29)
+#define EXYNOS_CITRGFMT_FLIP_SHIFT                     (14)
+#define EXYNOS_CITRGFMT_FLIP_NORMAL            (0 << 14)
+#define EXYNOS_CITRGFMT_FLIP_X_MIRROR          (1 << 14)
+#define EXYNOS_CITRGFMT_FLIP_Y_MIRROR          (2 << 14)
+#define EXYNOS_CITRGFMT_FLIP_180                       (3 << 14)
+#define EXYNOS_CITRGFMT_FLIP_MASK                      (3 << 14)
+#define EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE             (1 << 13)
+#define EXYNOS_CITRGFMT_TARGETV_MASK           (0x1fff << 0)
+#define EXYNOS_CITRGFMT_TARGETH_MASK           (0x1fff << 16)
+
+/* Output DMA control register */
+#define EXYNOS_CIOCTRL_WEAVE_OUT                       (1 << 31)
+#define EXYNOS_CIOCTRL_WEAVE_MASK                      (1 << 31)
+#define EXYNOS_CIOCTRL_LASTENDEN                       (1 << 30)
+#define EXYNOS_CIOCTRL_ORDER2P_LSB_CBCR                (0 << 24)
+#define EXYNOS_CIOCTRL_ORDER2P_LSB_CRCB                (1 << 24)
+#define EXYNOS_CIOCTRL_ORDER2P_MSB_CRCB                (2 << 24)
+#define EXYNOS_CIOCTRL_ORDER2P_MSB_CBCR                (3 << 24)
+#define EXYNOS_CIOCTRL_ORDER2P_SHIFT           (24)
+#define EXYNOS_CIOCTRL_ORDER2P_MASK            (3 << 24)
+#define EXYNOS_CIOCTRL_YCBCR_3PLANE            (0 << 3)
+#define EXYNOS_CIOCTRL_YCBCR_2PLANE            (1 << 3)
+#define EXYNOS_CIOCTRL_YCBCR_PLANE_MASK                (1 << 3)
+#define EXYNOS_CIOCTRL_LASTIRQ_ENABLE          (1 << 2)
+#define EXYNOS_CIOCTRL_ALPHA_OUT                       (0xff << 4)
+#define EXYNOS_CIOCTRL_ORDER422_YCBYCR         (0 << 0)
+#define EXYNOS_CIOCTRL_ORDER422_YCRYCB         (1 << 0)
+#define EXYNOS_CIOCTRL_ORDER422_CBYCRY         (2 << 0)
+#define EXYNOS_CIOCTRL_ORDER422_CRYCBY         (3 << 0)
+#define EXYNOS_CIOCTRL_ORDER422_MASK           (3 << 0)
+
+/* Main scaler control register */
+#define EXYNOS_CISCCTRL_SCALERBYPASS           (1 << 31)
+#define EXYNOS_CISCCTRL_SCALEUP_H                      (1 << 30)
+#define EXYNOS_CISCCTRL_SCALEUP_V                      (1 << 29)
+#define EXYNOS_CISCCTRL_CSCR2Y_NARROW          (0 << 28)
+#define EXYNOS_CISCCTRL_CSCR2Y_WIDE            (1 << 28)
+#define EXYNOS_CISCCTRL_CSCY2R_NARROW          (0 << 27)
+#define EXYNOS_CISCCTRL_CSCY2R_WIDE            (1 << 27)
+#define EXYNOS_CISCCTRL_LCDPATHEN_FIFO         (1 << 26)
+#define EXYNOS_CISCCTRL_PROGRESSIVE            (0 << 25)
+#define EXYNOS_CISCCTRL_INTERLACE                      (1 << 25)
+#define EXYNOS_CISCCTRL_SCAN_MASK                      (1 << 25)
+#define EXYNOS_CISCCTRL_SCALERSTART            (1 << 15)
+#define EXYNOS_CISCCTRL_INRGB_FMT_RGB565               (0 << 13)
+#define EXYNOS_CISCCTRL_INRGB_FMT_RGB666               (1 << 13)
+#define EXYNOS_CISCCTRL_INRGB_FMT_RGB888               (2 << 13)
+#define EXYNOS_CISCCTRL_INRGB_FMT_RGB_MASK             (3 << 13)
+#define EXYNOS_CISCCTRL_OUTRGB_FMT_RGB565              (0 << 11)
+#define EXYNOS_CISCCTRL_OUTRGB_FMT_RGB666              (1 << 11)
+#define EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888              (2 << 11)
+#define EXYNOS_CISCCTRL_OUTRGB_FMT_RGB_MASK    (3 << 11)
+#define EXYNOS_CISCCTRL_EXTRGB_NORMAL          (0 << 10)
+#define EXYNOS_CISCCTRL_EXTRGB_EXTENSION               (1 << 10)
+#define EXYNOS_CISCCTRL_ONE2ONE                        (1 << 9)
+#define EXYNOS_CISCCTRL_MAIN_V_RATIO_MASK              (0x1ff << 0)
+#define EXYNOS_CISCCTRL_MAIN_H_RATIO_MASK              (0x1ff << 16)
+
+/* Status register */
+#define EXYNOS_CISTATUS_OVFIY                  (1 << 31)
+#define EXYNOS_CISTATUS_OVFICB                 (1 << 30)
+#define EXYNOS_CISTATUS_OVFICR                 (1 << 29)
+#define EXYNOS_CISTATUS_VSYNC                  (1 << 28)
+#define EXYNOS_CISTATUS_SCALERSTART            (1 << 26)
+#define EXYNOS_CISTATUS_WINOFSTEN                      (1 << 25)
+#define EXYNOS_CISTATUS_IMGCPTEN                       (1 << 22)
+#define EXYNOS_CISTATUS_IMGCPTENSC                     (1 << 21)
+#define EXYNOS_CISTATUS_VSYNC_A                        (1 << 20)
+#define EXYNOS_CISTATUS_VSYNC_B                        (1 << 19)
+#define EXYNOS_CISTATUS_OVRLB                  (1 << 18)
+#define EXYNOS_CISTATUS_FRAMEEND                       (1 << 17)
+#define EXYNOS_CISTATUS_LASTCAPTUREEND         (1 << 16)
+#define EXYNOS_CISTATUS_VVALID_A                       (1 << 15)
+#define EXYNOS_CISTATUS_VVALID_B                       (1 << 14)
+
+/* Image capture enable register */
+#define EXYNOS_CIIMGCPT_IMGCPTEN                       (1 << 31)
+#define EXYNOS_CIIMGCPT_IMGCPTEN_SC            (1 << 30)
+#define EXYNOS_CIIMGCPT_CPT_FREN_ENABLE                (1 << 25)
+#define EXYNOS_CIIMGCPT_CPT_FRMOD_EN           (0 << 18)
+#define EXYNOS_CIIMGCPT_CPT_FRMOD_CNT          (1 << 18)
+
+/* Image effects register */
+#define EXYNOS_CIIMGEFF_IE_DISABLE                     (0 << 30)
+#define EXYNOS_CIIMGEFF_IE_ENABLE                      (1 << 30)
+#define EXYNOS_CIIMGEFF_IE_SC_BEFORE           (0 << 29)
+#define EXYNOS_CIIMGEFF_IE_SC_AFTER            (1 << 29)
+#define EXYNOS_CIIMGEFF_FIN_BYPASS                     (0 << 26)
+#define EXYNOS_CIIMGEFF_FIN_ARBITRARY          (1 << 26)
+#define EXYNOS_CIIMGEFF_FIN_NEGATIVE           (2 << 26)
+#define EXYNOS_CIIMGEFF_FIN_ARTFREEZE          (3 << 26)
+#define EXYNOS_CIIMGEFF_FIN_EMBOSSING          (4 << 26)
+#define EXYNOS_CIIMGEFF_FIN_SILHOUETTE         (5 << 26)
+#define EXYNOS_CIIMGEFF_FIN_MASK                       (7 << 26)
+#define EXYNOS_CIIMGEFF_PAT_CBCR_MASK          ((0xff < 13) | (0xff < 0))
+
+/* Real input DMA size register */
+#define EXYNOS_CIREAL_ISIZE_AUTOLOAD_ENABLE    (1 << 31)
+#define EXYNOS_CIREAL_ISIZE_ADDR_CH_DISABLE    (1 << 30)
+#define EXYNOS_CIREAL_ISIZE_HEIGHT_MASK                (0x3FFF << 16)
+#define EXYNOS_CIREAL_ISIZE_WIDTH_MASK         (0x3FFF << 0)
+
+/* Input DMA control register */
+#define EXYNOS_MSCTRL_FIELD_MASK                       (1 << 31)
+#define EXYNOS_MSCTRL_FIELD_WEAVE                      (1 << 31)
+#define EXYNOS_MSCTRL_FIELD_NORMAL                     (0 << 31)
+#define EXYNOS_MSCTRL_BURST_CNT                        (24)
+#define EXYNOS_MSCTRL_BURST_CNT_MASK           (0xf << 24)
+#define EXYNOS_MSCTRL_ORDER2P_LSB_CBCR         (0 << 16)
+#define EXYNOS_MSCTRL_ORDER2P_LSB_CRCB         (1 << 16)
+#define EXYNOS_MSCTRL_ORDER2P_MSB_CRCB         (2 << 16)
+#define EXYNOS_MSCTRL_ORDER2P_MSB_CBCR         (3 << 16)
+#define EXYNOS_MSCTRL_ORDER2P_SHIFT            (16)
+#define EXYNOS_MSCTRL_ORDER2P_SHIFT_MASK               (0x3 << 16)
+#define EXYNOS_MSCTRL_C_INT_IN_3PLANE          (0 << 15)
+#define EXYNOS_MSCTRL_C_INT_IN_2PLANE          (1 << 15)
+#define EXYNOS_MSCTRL_FLIP_SHIFT                       (13)
+#define EXYNOS_MSCTRL_FLIP_NORMAL                      (0 << 13)
+#define EXYNOS_MSCTRL_FLIP_X_MIRROR            (1 << 13)
+#define EXYNOS_MSCTRL_FLIP_Y_MIRROR            (2 << 13)
+#define EXYNOS_MSCTRL_FLIP_180                 (3 << 13)
+#define EXYNOS_MSCTRL_FLIP_MASK                        (3 << 13)
+#define EXYNOS_MSCTRL_ORDER422_CRYCBY          (0 << 4)
+#define EXYNOS_MSCTRL_ORDER422_YCRYCB          (1 << 4)
+#define EXYNOS_MSCTRL_ORDER422_CBYCRY          (2 << 4)
+#define EXYNOS_MSCTRL_ORDER422_YCBYCR          (3 << 4)
+#define EXYNOS_MSCTRL_INPUT_EXTCAM                     (0 << 3)
+#define EXYNOS_MSCTRL_INPUT_MEMORY                     (1 << 3)
+#define EXYNOS_MSCTRL_INPUT_MASK                       (1 << 3)
+#define EXYNOS_MSCTRL_INFORMAT_YCBCR420                (0 << 1)
+#define EXYNOS_MSCTRL_INFORMAT_YCBCR422                (1 << 1)
+#define EXYNOS_MSCTRL_INFORMAT_YCBCR422_1PLANE (2 << 1)
+#define EXYNOS_MSCTRL_INFORMAT_RGB                     (3 << 1)
+#define EXYNOS_MSCTRL_ENVID                    (1 << 0)
+
+/* DMA parameter register */
+#define EXYNOS_CIDMAPARAM_R_MODE_LINEAR                (0 << 29)
+#define EXYNOS_CIDMAPARAM_R_MODE_CONFTILE              (1 << 29)
+#define EXYNOS_CIDMAPARAM_R_MODE_16X16         (2 << 29)
+#define EXYNOS_CIDMAPARAM_R_MODE_64X32         (3 << 29)
+#define EXYNOS_CIDMAPARAM_R_MODE_MASK          (3 << 29)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_64              (0 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_128             (1 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_256             (2 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_512             (3 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_1024    (4 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_2048    (5 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_4096    (6 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_1               (0 << 20)
+#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_2               (1 << 20)
+#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_4               (2 << 20)
+#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_8               (3 << 20)
+#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_16              (4 << 20)
+#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_32              (5 << 20)
+#define EXYNOS_CIDMAPARAM_W_MODE_LINEAR                (0 << 13)
+#define EXYNOS_CIDMAPARAM_W_MODE_CONFTILE              (1 << 13)
+#define EXYNOS_CIDMAPARAM_W_MODE_16X16         (2 << 13)
+#define EXYNOS_CIDMAPARAM_W_MODE_64X32         (3 << 13)
+#define EXYNOS_CIDMAPARAM_W_MODE_MASK          (3 << 13)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_64              (0 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_128             (1 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_256             (2 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_512             (3 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_1024    (4 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_2048    (5 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_4096    (6 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_1               (0 << 4)
+#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_2               (1 << 4)
+#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_4               (2 << 4)
+#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_8               (3 << 4)
+#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_16              (4 << 4)
+#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_32              (5 << 4)
+
+/* Gathering Extension register */
+#define EXYNOS_CIEXTEN_TARGETH_EXT_MASK                (1 << 26)
+#define EXYNOS_CIEXTEN_TARGETV_EXT_MASK                (1 << 24)
+#define EXYNOS_CIEXTEN_MAINHORRATIO_EXT_MASK   (0x3F << 10)
+#define EXYNOS_CIEXTEN_MAINVERRATIO_EXT_MASK   (0x3F)
+#define EXYNOS_CIEXTEN_YUV444_OUT                      (1 << 22)
+
+/* FIMC Clock Source Select register */
+#define EXYNOS_CLKSRC_HCLK                             (0 << 1)
+#define EXYNOS_CLKSRC_HCLK_MASK                        (1 << 1)
+#define EXYNOS_CLKSRC_SCLK                             (1 << 1)
+
+/* SYSREG for FIMC writeback */
+#define SYSREG_CAMERA_BLK                      (S3C_VA_SYS + 0x0218)
+#define SYSREG_ISP_BLK                         (S3C_VA_SYS + 0x020c)
+#define SYSREG_FIMD0WB_DEST_MASK       (0x3 << 23)
+#define SYSREG_FIMD0WB_DEST_SHIFT      23
+
+#endif /* EXYNOS_REGS_FIMC_H */
diff --git a/drivers/gpu/drm/exynos/regs-gsc.h b/drivers/gpu/drm/exynos/regs-gsc.h
new file mode 100644 (file)
index 0000000..9ad5927
--- /dev/null
@@ -0,0 +1,284 @@
+/* linux/drivers/gpu/drm/exynos/regs-gsc.h
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ *             http://www.samsung.com
+ *
+ * Register definition file for Samsung G-Scaler driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef EXYNOS_REGS_GSC_H_
+#define EXYNOS_REGS_GSC_H_
+
+/* G-Scaler enable */
+#define GSC_ENABLE                     0x00
+#define GSC_ENABLE_PP_UPDATE_TIME_MASK (1 << 9)
+#define GSC_ENABLE_PP_UPDATE_TIME_CURR (0 << 9)
+#define GSC_ENABLE_PP_UPDATE_TIME_EOPAS        (1 << 9)
+#define GSC_ENABLE_CLK_GATE_MODE_MASK  (1 << 8)
+#define GSC_ENABLE_CLK_GATE_MODE_FREE  (1 << 8)
+#define GSC_ENABLE_IPC_MODE_MASK       (1 << 7)
+#define GSC_ENABLE_NORM_MODE           (0 << 7)
+#define GSC_ENABLE_IPC_MODE            (1 << 7)
+#define GSC_ENABLE_PP_UPDATE_MODE_MASK (1 << 6)
+#define GSC_ENABLE_PP_UPDATE_FIRE_MODE (1 << 6)
+#define GSC_ENABLE_IN_PP_UPDATE                (1 << 5)
+#define GSC_ENABLE_ON_CLEAR_MASK       (1 << 4)
+#define GSC_ENABLE_ON_CLEAR_ONESHOT    (1 << 4)
+#define GSC_ENABLE_QOS_ENABLE          (1 << 3)
+#define GSC_ENABLE_OP_STATUS           (1 << 2)
+#define GSC_ENABLE_SFR_UPDATE          (1 << 1)
+#define GSC_ENABLE_ON                  (1 << 0)
+
+/* G-Scaler S/W reset */
+#define GSC_SW_RESET                   0x04
+#define GSC_SW_RESET_SRESET            (1 << 0)
+
+/* G-Scaler IRQ */
+#define GSC_IRQ                                0x08
+#define GSC_IRQ_STATUS_OR_IRQ          (1 << 17)
+#define GSC_IRQ_STATUS_OR_FRM_DONE     (1 << 16)
+#define GSC_IRQ_OR_MASK                        (1 << 2)
+#define GSC_IRQ_FRMDONE_MASK           (1 << 1)
+#define GSC_IRQ_ENABLE                 (1 << 0)
+
+/* G-Scaler input control */
+#define GSC_IN_CON                     0x10
+#define GSC_IN_CHROM_STRIDE_SEL_MASK   (1 << 20)
+#define GSC_IN_CHROM_STRIDE_SEPAR      (1 << 20)
+#define GSC_IN_RB_SWAP_MASK            (1 << 19)
+#define GSC_IN_RB_SWAP                 (1 << 19)
+#define GSC_IN_ROT_MASK                        (7 << 16)
+#define GSC_IN_ROT_270                 (7 << 16)
+#define GSC_IN_ROT_90_YFLIP            (6 << 16)
+#define GSC_IN_ROT_90_XFLIP            (5 << 16)
+#define GSC_IN_ROT_90                  (4 << 16)
+#define GSC_IN_ROT_180                 (3 << 16)
+#define GSC_IN_ROT_YFLIP               (2 << 16)
+#define GSC_IN_ROT_XFLIP               (1 << 16)
+#define GSC_IN_RGB_TYPE_MASK           (3 << 14)
+#define GSC_IN_RGB_HD_WIDE             (3 << 14)
+#define GSC_IN_RGB_HD_NARROW           (2 << 14)
+#define GSC_IN_RGB_SD_WIDE             (1 << 14)
+#define GSC_IN_RGB_SD_NARROW           (0 << 14)
+#define GSC_IN_YUV422_1P_ORDER_MASK    (1 << 13)
+#define GSC_IN_YUV422_1P_ORDER_LSB_Y   (0 << 13)
+#define GSC_IN_YUV422_1P_OEDER_LSB_C   (1 << 13)
+#define GSC_IN_CHROMA_ORDER_MASK       (1 << 12)
+#define GSC_IN_CHROMA_ORDER_CBCR       (0 << 12)
+#define GSC_IN_CHROMA_ORDER_CRCB       (1 << 12)
+#define GSC_IN_FORMAT_MASK             (7 << 8)
+#define GSC_IN_XRGB8888                        (0 << 8)
+#define GSC_IN_RGB565                  (1 << 8)
+#define GSC_IN_YUV420_2P               (2 << 8)
+#define GSC_IN_YUV420_3P               (3 << 8)
+#define GSC_IN_YUV422_1P               (4 << 8)
+#define GSC_IN_YUV422_2P               (5 << 8)
+#define GSC_IN_YUV422_3P               (6 << 8)
+#define GSC_IN_TILE_TYPE_MASK          (1 << 4)
+#define GSC_IN_TILE_C_16x8             (0 << 4)
+#define GSC_IN_TILE_C_16x16            (1 << 4)
+#define GSC_IN_TILE_MODE               (1 << 3)
+#define GSC_IN_LOCAL_SEL_MASK          (3 << 1)
+#define GSC_IN_LOCAL_CAM3              (3 << 1)
+#define GSC_IN_LOCAL_FIMD_WB           (2 << 1)
+#define GSC_IN_LOCAL_CAM1              (1 << 1)
+#define GSC_IN_LOCAL_CAM0              (0 << 1)
+#define GSC_IN_PATH_MASK               (1 << 0)
+#define GSC_IN_PATH_LOCAL              (1 << 0)
+#define GSC_IN_PATH_MEMORY             (0 << 0)
+
+/* G-Scaler source image size */
+#define GSC_SRCIMG_SIZE                        0x14
+#define GSC_SRCIMG_HEIGHT_MASK         (0x1fff << 16)
+#define GSC_SRCIMG_HEIGHT(x)           ((x) << 16)
+#define GSC_SRCIMG_WIDTH_MASK          (0x3fff << 0)
+#define GSC_SRCIMG_WIDTH(x)            ((x) << 0)
+
+/* G-Scaler source image offset */
+#define GSC_SRCIMG_OFFSET              0x18
+#define GSC_SRCIMG_OFFSET_Y_MASK       (0x1fff << 16)
+#define GSC_SRCIMG_OFFSET_Y(x)         ((x) << 16)
+#define GSC_SRCIMG_OFFSET_X_MASK       (0x1fff << 0)
+#define GSC_SRCIMG_OFFSET_X(x)         ((x) << 0)
+
+/* G-Scaler cropped source image size */
+#define GSC_CROPPED_SIZE               0x1C
+#define GSC_CROPPED_HEIGHT_MASK                (0x1fff << 16)
+#define GSC_CROPPED_HEIGHT(x)          ((x) << 16)
+#define GSC_CROPPED_WIDTH_MASK         (0x1fff << 0)
+#define GSC_CROPPED_WIDTH(x)           ((x) << 0)
+
+/* G-Scaler output control */
+#define GSC_OUT_CON                    0x20
+#define GSC_OUT_GLOBAL_ALPHA_MASK      (0xff << 24)
+#define GSC_OUT_GLOBAL_ALPHA(x)                ((x) << 24)
+#define GSC_OUT_CHROM_STRIDE_SEL_MASK  (1 << 13)
+#define GSC_OUT_CHROM_STRIDE_SEPAR     (1 << 13)
+#define GSC_OUT_RB_SWAP_MASK           (1 << 12)
+#define GSC_OUT_RB_SWAP                        (1 << 12)
+#define GSC_OUT_RGB_TYPE_MASK          (3 << 10)
+#define GSC_OUT_RGB_HD_NARROW          (3 << 10)
+#define GSC_OUT_RGB_HD_WIDE            (2 << 10)
+#define GSC_OUT_RGB_SD_NARROW          (1 << 10)
+#define GSC_OUT_RGB_SD_WIDE            (0 << 10)
+#define GSC_OUT_YUV422_1P_ORDER_MASK   (1 << 9)
+#define GSC_OUT_YUV422_1P_ORDER_LSB_Y  (0 << 9)
+#define GSC_OUT_YUV422_1P_OEDER_LSB_C  (1 << 9)
+#define GSC_OUT_CHROMA_ORDER_MASK      (1 << 8)
+#define GSC_OUT_CHROMA_ORDER_CBCR      (0 << 8)
+#define GSC_OUT_CHROMA_ORDER_CRCB      (1 << 8)
+#define GSC_OUT_FORMAT_MASK            (7 << 4)
+#define GSC_OUT_XRGB8888               (0 << 4)
+#define GSC_OUT_RGB565                 (1 << 4)
+#define GSC_OUT_YUV420_2P              (2 << 4)
+#define GSC_OUT_YUV420_3P              (3 << 4)
+#define GSC_OUT_YUV422_1P              (4 << 4)
+#define GSC_OUT_YUV422_2P              (5 << 4)
+#define GSC_OUT_YUV444                 (7 << 4)
+#define GSC_OUT_TILE_TYPE_MASK         (1 << 2)
+#define GSC_OUT_TILE_C_16x8            (0 << 2)
+#define GSC_OUT_TILE_C_16x16           (1 << 2)
+#define GSC_OUT_TILE_MODE              (1 << 1)
+#define GSC_OUT_PATH_MASK              (1 << 0)
+#define GSC_OUT_PATH_LOCAL             (1 << 0)
+#define GSC_OUT_PATH_MEMORY            (0 << 0)
+
+/* G-Scaler scaled destination image size */
+#define GSC_SCALED_SIZE                        0x24
+#define GSC_SCALED_HEIGHT_MASK         (0x1fff << 16)
+#define GSC_SCALED_HEIGHT(x)           ((x) << 16)
+#define GSC_SCALED_WIDTH_MASK          (0x1fff << 0)
+#define GSC_SCALED_WIDTH(x)            ((x) << 0)
+
+/* G-Scaler pre scale ratio */
+#define GSC_PRE_SCALE_RATIO            0x28
+#define GSC_PRESC_SHFACTOR_MASK                (7 << 28)
+#define GSC_PRESC_SHFACTOR(x)          ((x) << 28)
+#define GSC_PRESC_V_RATIO_MASK         (7 << 16)
+#define GSC_PRESC_V_RATIO(x)           ((x) << 16)
+#define GSC_PRESC_H_RATIO_MASK         (7 << 0)
+#define GSC_PRESC_H_RATIO(x)           ((x) << 0)
+
+/* G-Scaler main scale horizontal ratio */
+#define GSC_MAIN_H_RATIO               0x2C
+#define GSC_MAIN_H_RATIO_MASK          (0xfffff << 0)
+#define GSC_MAIN_H_RATIO_VALUE(x)      ((x) << 0)
+
+/* G-Scaler main scale vertical ratio */
+#define GSC_MAIN_V_RATIO               0x30
+#define GSC_MAIN_V_RATIO_MASK          (0xfffff << 0)
+#define GSC_MAIN_V_RATIO_VALUE(x)      ((x) << 0)
+
+/* G-Scaler input chrominance stride */
+#define GSC_IN_CHROM_STRIDE            0x3C
+#define GSC_IN_CHROM_STRIDE_MASK       (0x3fff << 0)
+#define GSC_IN_CHROM_STRIDE_VALUE(x)   ((x) << 0)
+
+/* G-Scaler destination image size */
+#define GSC_DSTIMG_SIZE                        0x40
+#define GSC_DSTIMG_HEIGHT_MASK         (0x1fff << 16)
+#define GSC_DSTIMG_HEIGHT(x)           ((x) << 16)
+#define GSC_DSTIMG_WIDTH_MASK          (0x1fff << 0)
+#define GSC_DSTIMG_WIDTH(x)            ((x) << 0)
+
+/* G-Scaler destination image offset */
+#define GSC_DSTIMG_OFFSET              0x44
+#define GSC_DSTIMG_OFFSET_Y_MASK       (0x1fff << 16)
+#define GSC_DSTIMG_OFFSET_Y(x)         ((x) << 16)
+#define GSC_DSTIMG_OFFSET_X_MASK       (0x1fff << 0)
+#define GSC_DSTIMG_OFFSET_X(x)         ((x) << 0)
+
+/* G-Scaler output chrominance stride */
+#define GSC_OUT_CHROM_STRIDE           0x48
+#define GSC_OUT_CHROM_STRIDE_MASK      (0x3fff << 0)
+#define GSC_OUT_CHROM_STRIDE_VALUE(x)  ((x) << 0)
+
+/* G-Scaler input y address mask */
+#define GSC_IN_BASE_ADDR_Y_MASK                0x4C
+/* G-Scaler input y base address */
+#define GSC_IN_BASE_ADDR_Y(n)          (0x50 + (n) * 0x4)
+/* G-Scaler input y base current address */
+#define GSC_IN_BASE_ADDR_Y_CUR(n)      (0x60 + (n) * 0x4)
+
+/* G-Scaler input cb address mask */
+#define GSC_IN_BASE_ADDR_CB_MASK       0x7C
+/* G-Scaler input cb base address */
+#define GSC_IN_BASE_ADDR_CB(n)         (0x80 + (n) * 0x4)
+/* G-Scaler input cb base current address */
+#define GSC_IN_BASE_ADDR_CB_CUR(n)     (0x90 + (n) * 0x4)
+
+/* G-Scaler input cr address mask */
+#define GSC_IN_BASE_ADDR_CR_MASK       0xAC
+/* G-Scaler input cr base address */
+#define GSC_IN_BASE_ADDR_CR(n)         (0xB0 + (n) * 0x4)
+/* G-Scaler input cr base current address */
+#define GSC_IN_BASE_ADDR_CR_CUR(n)     (0xC0 + (n) * 0x4)
+
+/* G-Scaler input address mask */
+#define GSC_IN_CURR_ADDR_INDEX (0xf << 24)
+#define GSC_IN_CURR_GET_INDEX(x)       ((x) >> 24)
+#define GSC_IN_BASE_ADDR_PINGPONG(x)   ((x) << 16)
+#define GSC_IN_BASE_ADDR_MASK          (0xff << 0)
+
+/* G-Scaler output y address mask */
+#define GSC_OUT_BASE_ADDR_Y_MASK       0x10C
+/* G-Scaler output y base address */
+#define GSC_OUT_BASE_ADDR_Y(n)         (0x110 + (n) * 0x4)
+
+/* G-Scaler output cb address mask */
+#define GSC_OUT_BASE_ADDR_CB_MASK      0x15C
+/* G-Scaler output cb base address */
+#define GSC_OUT_BASE_ADDR_CB(n)                (0x160 + (n) * 0x4)
+
+/* G-Scaler output cr address mask */
+#define GSC_OUT_BASE_ADDR_CR_MASK      0x1AC
+/* G-Scaler output cr base address */
+#define GSC_OUT_BASE_ADDR_CR(n)                (0x1B0 + (n) * 0x4)
+
+/* G-Scaler output address mask */
+#define GSC_OUT_CURR_ADDR_INDEX                (0xf << 24)
+#define GSC_OUT_CURR_GET_INDEX(x)      ((x) >> 24)
+#define GSC_OUT_BASE_ADDR_PINGPONG(x)  ((x) << 16)
+#define GSC_OUT_BASE_ADDR_MASK         (0xffff << 0)
+
+/* G-Scaler horizontal scaling filter */
+#define GSC_HCOEF(n, s, x)     (0x300 + (n) * 0x4 + (s) * 0x30 + (x) * 0x300)
+
+/* G-Scaler vertical scaling filter */
+#define GSC_VCOEF(n, s, x)     (0x200 + (n) * 0x4 + (s) * 0x30 + (x) * 0x300)
+
+/* G-Scaler BUS control */
+#define GSC_BUSCON                     0xA78
+#define GSC_BUSCON_INT_TIME_MASK       (1 << 8)
+#define GSC_BUSCON_INT_DATA_TRANS      (0 << 8)
+#define GSC_BUSCON_INT_AXI_RESPONSE    (1 << 8)
+#define GSC_BUSCON_AWCACHE(x)          ((x) << 4)
+#define GSC_BUSCON_ARCACHE(x)          ((x) << 0)
+
+/* G-Scaler V position */
+#define GSC_VPOSITION                  0xA7C
+#define GSC_VPOS_F(x)                  ((x) << 0)
+
+
+/* G-Scaler clock initial count */
+#define GSC_CLK_INIT_COUNT             0xC00
+#define GSC_CLK_GATE_MODE_INIT_CNT(x)  ((x) << 0)
+
+/* G-Scaler clock snoop count */
+#define GSC_CLK_SNOOP_COUNT            0xC04
+#define GSC_CLK_GATE_MODE_SNOOP_CNT(x) ((x) << 0)
+
+/* SYSCON. GSCBLK_CFG */
+#define SYSREG_GSCBLK_CFG1             (S3C_VA_SYS + 0x0224)
+#define GSC_BLK_DISP1WB_DEST(x)                (x << 10)
+#define GSC_BLK_SW_RESET_WB_DEST(x)    (1 << (18 + x))
+#define GSC_BLK_PXLASYNC_LO_MASK_WB(x) (0 << (14 + x))
+#define GSC_BLK_GSCL_WB_IN_SRC_SEL(x)  (1 << (2 * x))
+#define SYSREG_GSCBLK_CFG2             (S3C_VA_SYS + 0x2000)
+#define PXLASYNC_LO_MASK_CAMIF_GSCL(x) (1 << (x))
+
+#endif /* EXYNOS_REGS_GSC_H_ */
index 9cc7c5e..ef1b3eb 100644 (file)
 #define HDMI_PHY_CMU                   HDMI_CTRL_BASE(0x007C)
 #define HDMI_CORE_RSTOUT               HDMI_CTRL_BASE(0x0080)
 
+/* PHY Control bit definition */
+
+/* HDMI_PHY_CON_0 */
+#define HDMI_PHY_POWER_OFF_EN          (1 << 0)
+
 /* Video related registers */
 #define HDMI_YMAX                      HDMI_CORE_BASE(0x0060)
 #define HDMI_YMIN                      HDMI_CORE_BASE(0x0064)
 #define HDMI_AVI_HEADER1               HDMI_CORE_BASE(0x0714)
 #define HDMI_AVI_HEADER2               HDMI_CORE_BASE(0x0718)
 #define HDMI_AVI_CHECK_SUM             HDMI_CORE_BASE(0x071C)
-#define HDMI_AVI_BYTE(n)               HDMI_CORE_BASE(0x0720 + 4 * (n))
+#define HDMI_AVI_BYTE(n)               HDMI_CORE_BASE(0x0720 + 4 * (n-1))
 
 #define HDMI_AUI_CON                   HDMI_CORE_BASE(0x0800)
 #define HDMI_AUI_HEADER0               HDMI_CORE_BASE(0x0810)
 #define HDMI_AUI_HEADER1               HDMI_CORE_BASE(0x0814)
 #define HDMI_AUI_HEADER2               HDMI_CORE_BASE(0x0818)
 #define HDMI_AUI_CHECK_SUM             HDMI_CORE_BASE(0x081C)
-#define HDMI_AUI_BYTE(n)               HDMI_CORE_BASE(0x0820 + 4 * (n))
+#define HDMI_AUI_BYTE(n)               HDMI_CORE_BASE(0x0820 + 4 * (n-1))
 
 #define HDMI_MPG_CON                   HDMI_CORE_BASE(0x0900)
 #define HDMI_MPG_CHECK_SUM             HDMI_CORE_BASE(0x091C)
 #define HDMI_AN_SEED_2                 HDMI_CORE_BASE(0x0E60)
 #define HDMI_AN_SEED_3                 HDMI_CORE_BASE(0x0E64)
 
+/* AVI bit definition */
+#define HDMI_AVI_CON_DO_NOT_TRANSMIT   (0 << 1)
+#define HDMI_AVI_CON_EVERY_VSYNC       (1 << 1)
+
+#define AVI_ACTIVE_FORMAT_VALID        (1 << 4)
+#define AVI_UNDERSCANNED_DISPLAY_VALID (1 << 1)
+
+/* AUI bit definition */
+#define HDMI_AUI_CON_NO_TRAN           (0 << 0)
+
+/* VSI bit definition */
+#define HDMI_VSI_CON_DO_NOT_TRANSMIT   (0 << 0)
+
 /* HDCP related registers */
 #define HDMI_HDCP_SHA1(n)              HDMI_CORE_BASE(0x7000 + 4 * (n))
 #define HDMI_HDCP_KSV_LIST(n)          HDMI_CORE_BASE(0x7050 + 4 * (n))
diff --git a/drivers/gpu/drm/exynos/regs-rotator.h b/drivers/gpu/drm/exynos/regs-rotator.h
new file mode 100644 (file)
index 0000000..a09ac6e
--- /dev/null
@@ -0,0 +1,73 @@
+/* drivers/gpu/drm/exynos/regs-rotator.h
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ *             http://www.samsung.com/
+ *
+ * Register definition file for Samsung Rotator Interface (Rotator) driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef EXYNOS_REGS_ROTATOR_H
+#define EXYNOS_REGS_ROTATOR_H
+
+/* Configuration */
+#define ROT_CONFIG                     0x00
+#define ROT_CONFIG_IRQ                 (3 << 8)
+
+/* Image Control */
+#define ROT_CONTROL                    0x10
+#define ROT_CONTROL_PATTERN_WRITE      (1 << 16)
+#define ROT_CONTROL_FMT_YCBCR420_2P    (1 << 8)
+#define ROT_CONTROL_FMT_RGB888         (6 << 8)
+#define ROT_CONTROL_FMT_MASK           (7 << 8)
+#define ROT_CONTROL_FLIP_VERTICAL      (2 << 6)
+#define ROT_CONTROL_FLIP_HORIZONTAL    (3 << 6)
+#define ROT_CONTROL_FLIP_MASK          (3 << 6)
+#define ROT_CONTROL_ROT_90             (1 << 4)
+#define ROT_CONTROL_ROT_180            (2 << 4)
+#define ROT_CONTROL_ROT_270            (3 << 4)
+#define ROT_CONTROL_ROT_MASK           (3 << 4)
+#define ROT_CONTROL_START              (1 << 0)
+
+/* Status */
+#define ROT_STATUS                     0x20
+#define ROT_STATUS_IRQ_PENDING(x)      (1 << (x))
+#define ROT_STATUS_IRQ(x)              (((x) >> 8) & 0x3)
+#define ROT_STATUS_IRQ_VAL_COMPLETE    1
+#define ROT_STATUS_IRQ_VAL_ILLEGAL     2
+
+/* Buffer Address */
+#define ROT_SRC_BUF_ADDR(n)            (0x30 + ((n) << 2))
+#define ROT_DST_BUF_ADDR(n)            (0x50 + ((n) << 2))
+
+/* Buffer Size */
+#define ROT_SRC_BUF_SIZE               0x3c
+#define ROT_DST_BUF_SIZE               0x5c
+#define ROT_SET_BUF_SIZE_H(x)          ((x) << 16)
+#define ROT_SET_BUF_SIZE_W(x)          ((x) << 0)
+#define ROT_GET_BUF_SIZE_H(x)          ((x) >> 16)
+#define ROT_GET_BUF_SIZE_W(x)          ((x) & 0xffff)
+
+/* Crop Position */
+#define ROT_SRC_CROP_POS               0x40
+#define ROT_DST_CROP_POS               0x60
+#define ROT_CROP_POS_Y(x)              ((x) << 16)
+#define ROT_CROP_POS_X(x)              ((x) << 0)
+
+/* Source Crop Size */
+#define ROT_SRC_CROP_SIZE              0x44
+#define ROT_SRC_CROP_SIZE_H(x)         ((x) << 16)
+#define ROT_SRC_CROP_SIZE_W(x)         ((x) << 0)
+
+/* Round to nearest aligned value */
+#define ROT_ALIGN(x, align, mask)      (((x) + (1 << ((align) - 1))) & (mask))
+/* Minimum limit value */
+#define ROT_MIN(min, mask)             (((min) + ~(mask)) & (mask))
+/* Maximum limit value */
+#define ROT_MAX(max, mask)             ((max) & (mask))
+
+#endif /* EXYNOS_REGS_ROTATOR_H */
+
index 1ceca3d..23e14e9 100644 (file)
@@ -523,7 +523,7 @@ void cdv_intel_attach_force_audio_property(struct drm_connector *connector)
 
                dev_priv->force_audio_property = prop;
        }
-       drm_connector_attach_property(connector, prop, 0);
+       drm_object_attach_property(&connector->base, prop, 0);
 }
 
 
@@ -553,7 +553,7 @@ void cdv_intel_attach_broadcast_rgb_property(struct drm_connector *connector)
                dev_priv->broadcast_rgb_property = prop;
        }
 
-       drm_connector_attach_property(connector, prop, 0);
+       drm_object_attach_property(&connector->base, prop, 0);
 }
 
 /* Cedarview */
index e3a3978..51044cc 100644 (file)
@@ -1650,7 +1650,7 @@ cdv_intel_dp_set_property(struct drm_connector *connector,
        struct cdv_intel_dp *intel_dp = encoder->dev_priv;
        int ret;
 
-       ret = drm_connector_property_set_value(connector, property, val);
+       ret = drm_object_property_set_value(&connector->base, property, val);
        if (ret)
                return ret;
 
index 7272a46..e223b50 100644 (file)
@@ -185,14 +185,14 @@ static int cdv_hdmi_set_property(struct drm_connector *connector,
                        return -1;
                }
 
-               if (drm_connector_property_get_value(connector,
+               if (drm_object_property_get_value(&connector->base,
                                                        property, &curValue))
                        return -1;
 
                if (curValue == value)
                        return 0;
 
-               if (drm_connector_property_set_value(connector,
+               if (drm_object_property_set_value(&connector->base,
                                                        property, value))
                        return -1;
 
@@ -341,7 +341,7 @@ void cdv_hdmi_init(struct drm_device *dev,
        connector->interlace_allowed = false;
        connector->doublescan_allowed = false;
 
-       drm_connector_attach_property(connector,
+       drm_object_attach_property(&connector->base,
                                      dev->mode_config.scaling_mode_property,
                                      DRM_MODE_SCALE_FULLSCREEN);
 
index b362dd3..d81dbc3 100644 (file)
@@ -479,7 +479,7 @@ static int cdv_intel_lvds_set_property(struct drm_connector *connector,
                        return -1;
                }
 
-               if (drm_connector_property_get_value(connector,
+               if (drm_object_property_get_value(&connector->base,
                                                     property,
                                                     &curValue))
                        return -1;
@@ -487,7 +487,7 @@ static int cdv_intel_lvds_set_property(struct drm_connector *connector,
                if (curValue == value)
                        return 0;
 
-               if (drm_connector_property_set_value(connector,
+               if (drm_object_property_set_value(&connector->base,
                                                        property,
                                                        value))
                        return -1;
@@ -502,7 +502,7 @@ static int cdv_intel_lvds_set_property(struct drm_connector *connector,
                                return -1;
                }
        } else if (!strcmp(property->name, "backlight") && encoder) {
-               if (drm_connector_property_set_value(connector,
+               if (drm_object_property_set_value(&connector->base,
                                                        property,
                                                        value))
                        return -1;
@@ -671,10 +671,10 @@ void cdv_intel_lvds_init(struct drm_device *dev,
        connector->doublescan_allowed = false;
 
        /*Attach connector properties*/
-       drm_connector_attach_property(connector,
+       drm_object_attach_property(&connector->base,
                                      dev->mode_config.scaling_mode_property,
                                      DRM_MODE_SCALE_FULLSCREEN);
-       drm_connector_attach_property(connector,
+       drm_object_attach_property(&connector->base,
                                      dev_priv->backlight_property,
                                      BRIGHTNESS_MAX_LEVEL);
 
index 32dba2a..2d4ab48 100644 (file)
@@ -265,13 +265,13 @@ static int mdfld_dsi_connector_set_property(struct drm_connector *connector,
                        goto set_prop_error;
                }
 
-               if (drm_connector_property_get_value(connector, property, &val))
+               if (drm_object_property_get_value(&connector->base, property, &val))
                        goto set_prop_error;
 
                if (val == value)
                        goto set_prop_done;
 
-               if (drm_connector_property_set_value(connector,
+               if (drm_object_property_set_value(&connector->base,
                                                        property, value))
                        goto set_prop_error;
 
@@ -296,7 +296,7 @@ static int mdfld_dsi_connector_set_property(struct drm_connector *connector,
                        }
                }
        } else if (!strcmp(property->name, "backlight") && encoder) {
-               if (drm_connector_property_set_value(connector, property,
+               if (drm_object_property_set_value(&connector->base, property,
                                                                        value))
                        goto set_prop_error;
                else
@@ -506,7 +506,7 @@ void mdfld_dsi_output_init(struct drm_device *dev,
 
        dev_dbg(dev->dev, "init DSI output on pipe %d\n", pipe);
 
-       if (!dev || ((pipe != 0) && (pipe != 2))) {
+       if (pipe != 0 && pipe != 2) {
                DRM_ERROR("Invalid parameter\n");
                return;
        }
@@ -572,10 +572,10 @@ void mdfld_dsi_output_init(struct drm_device *dev,
        connector->doublescan_allowed = false;
 
        /*attach properties*/
-       drm_connector_attach_property(connector,
+       drm_object_attach_property(&connector->base,
                                dev->mode_config.scaling_mode_property,
                                DRM_MODE_SCALE_FULLSCREEN);
-       drm_connector_attach_property(connector,
+       drm_object_attach_property(&connector->base,
                                dev_priv->backlight_property,
                                MDFLD_DSI_BRIGHTNESS_MAX_LEVEL);
 
index dec6a9a..74485dc 100644 (file)
@@ -820,7 +820,7 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
        REG_WRITE(map->pos, 0);
 
        if (psb_intel_encoder)
-               drm_connector_property_get_value(connector,
+               drm_object_property_get_value(&connector->base,
                        dev->mode_config.scaling_mode_property, &scalingType);
 
        if (scalingType == DRM_MODE_SCALE_NO_SCALE) {
index f2f9f38..30adbbe 100644 (file)
@@ -249,3 +249,9 @@ extern void oaktrail_hdmi_i2c_exit(struct pci_dev *dev);
 extern void oaktrail_hdmi_save(struct drm_device *dev);
 extern void oaktrail_hdmi_restore(struct drm_device *dev);
 extern void oaktrail_hdmi_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev);
+extern int oaktrail_crtc_hdmi_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
+                                               struct drm_display_mode *adjusted_mode, int x, int y,
+                                               struct drm_framebuffer *old_fb);
+extern void oaktrail_crtc_hdmi_dpms(struct drm_crtc *crtc, int mode);
+
+
index cdafd2a..3071526 100644 (file)
@@ -168,6 +168,11 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
        const struct psb_offset *map = &dev_priv->regmap[pipe];
        u32 temp;
 
+       if (pipe == 1) {
+               oaktrail_crtc_hdmi_dpms(crtc, mode);
+               return;
+       }
+
        if (!gma_power_begin(dev, true))
                return;
 
@@ -302,6 +307,9 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
        uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN;
        struct drm_connector *connector;
 
+       if (pipe == 1)
+               return oaktrail_crtc_hdmi_mode_set(crtc, mode, adjusted_mode, x, y, old_fb);
+
        if (!gma_power_begin(dev, true))
                return 0;
 
@@ -343,7 +351,7 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
                  (mode->crtc_vdisplay - 1));
 
        if (psb_intel_encoder)
-               drm_connector_property_get_value(connector,
+               drm_object_property_get_value(&connector->base,
                        dev->mode_config.scaling_mode_property, &scalingType);
 
        if (scalingType == DRM_MODE_SCALE_NO_SCALE) {
index 010b820..08747fd 100644 (file)
@@ -544,7 +544,7 @@ const struct psb_ops oaktrail_chip_ops = {
        .accel_2d = 1,
        .pipes = 2,
        .crtcs = 2,
-       .hdmi_mask = (1 << 0),
+       .hdmi_mask = (1 << 1),
        .lvds_mask = (1 << 0),
        .cursor_needs_phys = 0,
        .sgx_offset = MRST_SGX_OFFSET,
index 69e51e9..f036f1f 100644 (file)
@@ -155,6 +155,345 @@ static void oaktrail_hdmi_audio_disable(struct drm_device *dev)
        HDMI_READ(HDMI_HCR);
 }
 
+static void wait_for_vblank(struct drm_device *dev)
+{
+       /* Wait for 20ms, i.e. one cycle at 50hz. */
+       mdelay(20);
+}
+
+static unsigned int htotal_calculate(struct drm_display_mode *mode)
+{
+       u32 htotal, new_crtc_htotal;
+
+       htotal = (mode->crtc_hdisplay - 1) | ((mode->crtc_htotal - 1) << 16);
+
+       /*
+        * 1024 x 768  new_crtc_htotal = 0x1024;
+        * 1280 x 1024 new_crtc_htotal = 0x0c34;
+        */
+       new_crtc_htotal = (mode->crtc_htotal - 1) * 200 * 1000 / mode->clock;
+
+       DRM_DEBUG_KMS("new crtc htotal 0x%4x\n", new_crtc_htotal);
+       return (mode->crtc_hdisplay - 1) | (new_crtc_htotal << 16);
+}
+
+static void oaktrail_hdmi_find_dpll(struct drm_crtc *crtc, int target,
+                               int refclk, struct oaktrail_hdmi_clock *best_clock)
+{
+       int np_min, np_max, nr_min, nr_max;
+       int np, nr, nf;
+
+       np_min = DIV_ROUND_UP(oaktrail_hdmi_limit.vco.min, target * 10);
+       np_max = oaktrail_hdmi_limit.vco.max / (target * 10);
+       if (np_min < oaktrail_hdmi_limit.np.min)
+               np_min = oaktrail_hdmi_limit.np.min;
+       if (np_max > oaktrail_hdmi_limit.np.max)
+               np_max = oaktrail_hdmi_limit.np.max;
+
+       nr_min = DIV_ROUND_UP((refclk * 1000), (target * 10 * np_max));
+       nr_max = DIV_ROUND_UP((refclk * 1000), (target * 10 * np_min));
+       if (nr_min < oaktrail_hdmi_limit.nr.min)
+               nr_min = oaktrail_hdmi_limit.nr.min;
+       if (nr_max > oaktrail_hdmi_limit.nr.max)
+               nr_max = oaktrail_hdmi_limit.nr.max;
+
+       np = DIV_ROUND_UP((refclk * 1000), (target * 10 * nr_max));
+       nr = DIV_ROUND_UP((refclk * 1000), (target * 10 * np));
+       nf = DIV_ROUND_CLOSEST((target * 10 * np * nr), refclk);
+       DRM_DEBUG_KMS("np, nr, nf %d %d %d\n", np, nr, nf);
+
+       /*
+        * 1024 x 768  np = 1; nr = 0x26; nf = 0x0fd8000;
+        * 1280 x 1024 np = 1; nr = 0x17; nf = 0x1034000;
+        */
+       best_clock->np = np;
+       best_clock->nr = nr - 1;
+       best_clock->nf = (nf << 14);
+}
+
+static void scu_busy_loop(void __iomem *scu_base)
+{
+       u32 status = 0;
+       u32 loop_count = 0;
+
+       status = readl(scu_base + 0x04);
+       while (status & 1) {
+               udelay(1); /* scu processing time is in few u secods */
+               status = readl(scu_base + 0x04);
+               loop_count++;
+               /* break if scu doesn't reset busy bit after huge retry */
+               if (loop_count > 1000) {
+                       DRM_DEBUG_KMS("SCU IPC timed out");
+                       return;
+               }
+       }
+}
+
+/*
+ *     You don't want to know, you really really don't want to know....
+ *
+ *     This is magic. However it's safe magic because of the way the platform
+ *     works and it is necessary magic.
+ */
+static void oaktrail_hdmi_reset(struct drm_device *dev)
+{
+       void __iomem *base;
+       unsigned long scu_ipc_mmio = 0xff11c000UL;
+       int scu_len = 1024;
+
+       base = ioremap((resource_size_t)scu_ipc_mmio, scu_len);
+       if (base == NULL) {
+               DRM_ERROR("failed to map scu mmio\n");
+               return;
+       }
+
+       /* scu ipc: assert hdmi controller reset */
+       writel(0xff11d118, base + 0x0c);
+       writel(0x7fffffdf, base + 0x80);
+       writel(0x42005, base + 0x0);
+       scu_busy_loop(base);
+
+       /* scu ipc: de-assert hdmi controller reset */
+       writel(0xff11d118, base + 0x0c);
+       writel(0x7fffffff, base + 0x80);
+       writel(0x42005, base + 0x0);
+       scu_busy_loop(base);
+
+       iounmap(base);
+}
+
+int oaktrail_crtc_hdmi_mode_set(struct drm_crtc *crtc,
+                           struct drm_display_mode *mode,
+                           struct drm_display_mode *adjusted_mode,
+                           int x, int y,
+                           struct drm_framebuffer *old_fb)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_psb_private *dev_priv = dev->dev_private;
+       struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+       int pipe = 1;
+       int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
+       int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
+       int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
+       int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
+       int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
+       int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
+       int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
+       int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
+       int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
+       int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+       int refclk;
+       struct oaktrail_hdmi_clock clock;
+       u32 dspcntr, pipeconf, dpll, temp;
+       int dspcntr_reg = DSPBCNTR;
+
+       if (!gma_power_begin(dev, true))
+               return 0;
+
+       /* Disable the VGA plane that we never use */
+       REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
+
+       /* Disable dpll if necessary */
+       dpll = REG_READ(DPLL_CTRL);
+       if ((dpll & DPLL_PWRDN) == 0) {
+               REG_WRITE(DPLL_CTRL, dpll | (DPLL_PWRDN | DPLL_RESET));
+               REG_WRITE(DPLL_DIV_CTRL, 0x00000000);
+               REG_WRITE(DPLL_STATUS, 0x1);
+       }
+       udelay(150);
+
+       /* Reset controller */
+       oaktrail_hdmi_reset(dev);
+
+       /* program and enable dpll */
+       refclk = 25000;
+       oaktrail_hdmi_find_dpll(crtc, adjusted_mode->clock, refclk, &clock);
+
+       /* Set the DPLL */
+       dpll = REG_READ(DPLL_CTRL);
+       dpll &= ~DPLL_PDIV_MASK;
+       dpll &= ~(DPLL_PWRDN | DPLL_RESET);
+       REG_WRITE(DPLL_CTRL, 0x00000008);
+       REG_WRITE(DPLL_DIV_CTRL, ((clock.nf << 6) | clock.nr));
+       REG_WRITE(DPLL_ADJUST, ((clock.nf >> 14) - 1));
+       REG_WRITE(DPLL_CTRL, (dpll | (clock.np << DPLL_PDIV_SHIFT) | DPLL_ENSTAT | DPLL_DITHEN));
+       REG_WRITE(DPLL_UPDATE, 0x80000000);
+       REG_WRITE(DPLL_CLK_ENABLE, 0x80050102);
+       udelay(150);
+
+       /* configure HDMI */
+       HDMI_WRITE(0x1004, 0x1fd);
+       HDMI_WRITE(0x2000, 0x1);
+       HDMI_WRITE(0x2008, 0x0);
+       HDMI_WRITE(0x3130, 0x8);
+       HDMI_WRITE(0x101c, 0x1800810);
+
+       temp = htotal_calculate(adjusted_mode);
+       REG_WRITE(htot_reg, temp);
+       REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
+       REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
+       REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) | ((adjusted_mode->crtc_vtotal - 1) << 16));
+       REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) | ((adjusted_mode->crtc_vblank_end - 1) << 16));
+       REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
+       REG_WRITE(pipesrc_reg, ((mode->crtc_hdisplay - 1) << 16) |  (mode->crtc_vdisplay - 1));
+
+       REG_WRITE(PCH_HTOTAL_B, (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16));
+       REG_WRITE(PCH_HBLANK_B, (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
+       REG_WRITE(PCH_HSYNC_B, (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
+       REG_WRITE(PCH_VTOTAL_B, (adjusted_mode->crtc_vdisplay - 1) | ((adjusted_mode->crtc_vtotal - 1) << 16));
+       REG_WRITE(PCH_VBLANK_B, (adjusted_mode->crtc_vblank_start - 1) | ((adjusted_mode->crtc_vblank_end - 1) << 16));
+       REG_WRITE(PCH_VSYNC_B, (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
+       REG_WRITE(PCH_PIPEBSRC, ((mode->crtc_hdisplay - 1) << 16) |  (mode->crtc_vdisplay - 1));
+
+       temp = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
+       HDMI_WRITE(HDMI_HBLANK_A, ((adjusted_mode->crtc_hdisplay - 1) << 16) |  temp);
+
+       REG_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
+       REG_WRITE(dsppos_reg, 0);
+
+       /* Flush the plane changes */
+       {
+               struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+               crtc_funcs->mode_set_base(crtc, x, y, old_fb);
+       }
+
+       /* Set up the display plane register */
+       dspcntr = REG_READ(dspcntr_reg);
+       dspcntr |= DISPPLANE_GAMMA_ENABLE;
+       dspcntr |= DISPPLANE_SEL_PIPE_B;
+       dspcntr |= DISPLAY_PLANE_ENABLE;
+
+       /* setup pipeconf */
+       pipeconf = REG_READ(pipeconf_reg);
+       pipeconf |= PIPEACONF_ENABLE;
+
+       REG_WRITE(pipeconf_reg, pipeconf);
+       REG_READ(pipeconf_reg);
+
+       REG_WRITE(PCH_PIPEBCONF, pipeconf);
+       REG_READ(PCH_PIPEBCONF);
+       wait_for_vblank(dev);
+
+       REG_WRITE(dspcntr_reg, dspcntr);
+       wait_for_vblank(dev);
+
+       gma_power_end(dev);
+
+       return 0;
+}
+
+void oaktrail_crtc_hdmi_dpms(struct drm_crtc *crtc, int mode)
+{
+       struct drm_device *dev = crtc->dev;
+       u32 temp;
+
+       DRM_DEBUG_KMS("%s %d\n", __func__, mode);
+
+       switch (mode) {
+       case DRM_MODE_DPMS_OFF:
+               REG_WRITE(VGACNTRL, 0x80000000);
+
+               /* Disable plane */
+               temp = REG_READ(DSPBCNTR);
+               if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
+                       REG_WRITE(DSPBCNTR, temp & ~DISPLAY_PLANE_ENABLE);
+                       REG_READ(DSPBCNTR);
+                       /* Flush the plane changes */
+                       REG_WRITE(DSPBSURF, REG_READ(DSPBSURF));
+                       REG_READ(DSPBSURF);
+               }
+
+               /* Disable pipe B */
+               temp = REG_READ(PIPEBCONF);
+               if ((temp & PIPEACONF_ENABLE) != 0) {
+                       REG_WRITE(PIPEBCONF, temp & ~PIPEACONF_ENABLE);
+                       REG_READ(PIPEBCONF);
+               }
+
+               /* Disable LNW Pipes, etc */
+               temp = REG_READ(PCH_PIPEBCONF);
+               if ((temp & PIPEACONF_ENABLE) != 0) {
+                       REG_WRITE(PCH_PIPEBCONF, temp & ~PIPEACONF_ENABLE);
+                       REG_READ(PCH_PIPEBCONF);
+               }
+
+               /* wait for pipe off */
+               udelay(150);
+
+               /* Disable dpll */
+               temp = REG_READ(DPLL_CTRL);
+               if ((temp & DPLL_PWRDN) == 0) {
+                       REG_WRITE(DPLL_CTRL, temp | (DPLL_PWRDN | DPLL_RESET));
+                       REG_WRITE(DPLL_STATUS, 0x1);
+               }
+
+               /* wait for dpll off */
+               udelay(150);
+
+               break;
+       case DRM_MODE_DPMS_ON:
+       case DRM_MODE_DPMS_STANDBY:
+       case DRM_MODE_DPMS_SUSPEND:
+               /* Enable dpll */
+               temp = REG_READ(DPLL_CTRL);
+               if ((temp & DPLL_PWRDN) != 0) {
+                       REG_WRITE(DPLL_CTRL, temp & ~(DPLL_PWRDN | DPLL_RESET));
+                       temp = REG_READ(DPLL_CLK_ENABLE);
+                       REG_WRITE(DPLL_CLK_ENABLE, temp | DPLL_EN_DISP | DPLL_SEL_HDMI | DPLL_EN_HDMI);
+                       REG_READ(DPLL_CLK_ENABLE);
+               }
+               /* wait for dpll warm up */
+               udelay(150);
+
+               /* Enable pipe B */
+               temp = REG_READ(PIPEBCONF);
+               if ((temp & PIPEACONF_ENABLE) == 0) {
+                       REG_WRITE(PIPEBCONF, temp | PIPEACONF_ENABLE);
+                       REG_READ(PIPEBCONF);
+               }
+
+               /* Enable LNW Pipe B */
+               temp = REG_READ(PCH_PIPEBCONF);
+               if ((temp & PIPEACONF_ENABLE) == 0) {
+                       REG_WRITE(PCH_PIPEBCONF, temp | PIPEACONF_ENABLE);
+                       REG_READ(PCH_PIPEBCONF);
+               }
+
+               wait_for_vblank(dev);
+
+               /* Enable plane */
+               temp = REG_READ(DSPBCNTR);
+               if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
+                       REG_WRITE(DSPBCNTR, temp | DISPLAY_PLANE_ENABLE);
+                       /* Flush the plane changes */
+                       REG_WRITE(DSPBSURF, REG_READ(DSPBSURF));
+                       REG_READ(DSPBSURF);
+               }
+
+               psb_intel_crtc_load_lut(crtc);
+       }
+
+       /* DSPARB */
+       REG_WRITE(DSPARB, 0x00003fbf);
+
+       /* FW1 */
+       REG_WRITE(0x70034, 0x3f880a0a);
+
+       /* FW2 */
+       REG_WRITE(0x70038, 0x0b060808);
+
+       /* FW4 */
+       REG_WRITE(0x70050, 0x08030404);
+
+       /* FW5 */
+       REG_WRITE(0x70054, 0x04040404);
+
+       /* LNC Chicken Bits - Squawk! */
+       REG_WRITE(0x70400, 0x4000);
+
+       return;
+}
+
 static void oaktrail_hdmi_dpms(struct drm_encoder *encoder, int mode)
 {
        static int dpms_mode = -1;
@@ -233,13 +572,15 @@ static const unsigned char raw_edid[] = {
 
 static int oaktrail_hdmi_get_modes(struct drm_connector *connector)
 {
-       struct drm_device *dev = connector->dev;
-       struct drm_psb_private *dev_priv = dev->dev_private;
        struct i2c_adapter *i2c_adap;
        struct edid *edid;
-       struct drm_display_mode *mode, *t;
-       int i = 0, ret = 0;
+       int ret = 0;
 
+       /*
+        *      FIXME: We need to figure this lot out. In theory we can
+        *      read the EDID somehow but I've yet to find working reference
+        *      code.
+        */
        i2c_adap = i2c_get_adapter(3);
        if (i2c_adap == NULL) {
                DRM_ERROR("No ddc adapter available!\n");
@@ -253,17 +594,7 @@ static int oaktrail_hdmi_get_modes(struct drm_connector *connector)
                drm_mode_connector_update_edid_property(connector, edid);
                ret = drm_add_edid_modes(connector, edid);
        }
-
-       /*
-        * prune modes that require frame buffer bigger than stolen mem
-        */
-       list_for_each_entry_safe(mode, t, &connector->probed_modes, head) {
-               if ((mode->hdisplay * mode->vdisplay * 4) >= dev_priv->vram_stolen_size) {
-                       i++;
-                       drm_mode_remove(connector, mode);
-               }
-       }
-       return ret - i;
+       return ret;
 }
 
 static void oaktrail_hdmi_mode_set(struct drm_encoder *encoder,
@@ -349,6 +680,7 @@ void oaktrail_hdmi_init(struct drm_device *dev,
        connector->interlace_allowed = false;
        connector->doublescan_allowed = false;
        drm_sysfs_connector_add(connector);
+       dev_info(dev->dev, "HDMI initialised.\n");
 
        return;
 
@@ -403,6 +735,9 @@ void oaktrail_hdmi_setup(struct drm_device *dev)
 
        dev_priv->hdmi_priv = hdmi_dev;
        oaktrail_hdmi_audio_disable(dev);
+
+       dev_info(dev->dev, "HDMI hardware present.\n");
+
        return;
 
 free:
index 558c77f..325013a 100644 (file)
@@ -133,8 +133,8 @@ static void oaktrail_lvds_mode_set(struct drm_encoder *encoder,
                return;
        }
 
-       drm_connector_property_get_value(
-               connector,
+       drm_object_property_get_value(
+               &connector->base,
                dev->mode_config.scaling_mode_property,
                &v);
 
@@ -363,10 +363,10 @@ void oaktrail_lvds_init(struct drm_device *dev,
        connector->interlace_allowed = false;
        connector->doublescan_allowed = false;
 
-       drm_connector_attach_property(connector,
+       drm_object_attach_property(&connector->base,
                                        dev->mode_config.scaling_mode_property,
                                        DRM_MODE_SCALE_FULLSCREEN);
-       drm_connector_attach_property(connector,
+       drm_object_attach_property(&connector->base,
                                        dev_priv->backlight_property,
                                        BRIGHTNESS_MAX_LEVEL);
 
index 2a4c3a9..9fa5fa2 100644 (file)
@@ -603,7 +603,7 @@ int psb_intel_lvds_set_property(struct drm_connector *connector,
                        goto set_prop_error;
                }
 
-               if (drm_connector_property_get_value(connector,
+               if (drm_object_property_get_value(&connector->base,
                                                     property,
                                                     &curval))
                        goto set_prop_error;
@@ -611,7 +611,7 @@ int psb_intel_lvds_set_property(struct drm_connector *connector,
                if (curval == value)
                        goto set_prop_done;
 
-               if (drm_connector_property_set_value(connector,
+               if (drm_object_property_set_value(&connector->base,
                                                        property,
                                                        value))
                        goto set_prop_error;
@@ -626,7 +626,7 @@ int psb_intel_lvds_set_property(struct drm_connector *connector,
                                goto set_prop_error;
                }
        } else if (!strcmp(property->name, "backlight")) {
-               if (drm_connector_property_set_value(connector,
+               if (drm_object_property_set_value(&connector->base,
                                                        property,
                                                        value))
                        goto set_prop_error;
@@ -746,10 +746,10 @@ void psb_intel_lvds_init(struct drm_device *dev,
        connector->doublescan_allowed = false;
 
        /*Attach connector properties*/
-       drm_connector_attach_property(connector,
+       drm_object_attach_property(&connector->base,
                                      dev->mode_config.scaling_mode_property,
                                      DRM_MODE_SCALE_FULLSCREEN);
-       drm_connector_attach_property(connector,
+       drm_object_attach_property(&connector->base,
                                      dev_priv->backlight_property,
                                      BRIGHTNESS_MAX_LEVEL);
 
index fc92927..a4cc777 100644 (file)
@@ -1694,7 +1694,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector,
        uint8_t cmd;
        int ret;
 
-       ret = drm_connector_property_set_value(connector, property, val);
+       ret = drm_object_property_set_value(&connector->base, property, val);
        if (ret)
                return ret;
 
@@ -1749,7 +1749,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector,
        } else if (IS_TV_OR_LVDS(psb_intel_sdvo_connector)) {
                temp_value = val;
                if (psb_intel_sdvo_connector->left == property) {
-                       drm_connector_property_set_value(connector,
+                       drm_object_property_set_value(&connector->base,
                                                         psb_intel_sdvo_connector->right, val);
                        if (psb_intel_sdvo_connector->left_margin == temp_value)
                                return 0;
@@ -1761,7 +1761,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector,
                        cmd = SDVO_CMD_SET_OVERSCAN_H;
                        goto set_value;
                } else if (psb_intel_sdvo_connector->right == property) {
-                       drm_connector_property_set_value(connector,
+                       drm_object_property_set_value(&connector->base,
                                                         psb_intel_sdvo_connector->left, val);
                        if (psb_intel_sdvo_connector->right_margin == temp_value)
                                return 0;
@@ -1773,7 +1773,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector,
                        cmd = SDVO_CMD_SET_OVERSCAN_H;
                        goto set_value;
                } else if (psb_intel_sdvo_connector->top == property) {
-                       drm_connector_property_set_value(connector,
+                       drm_object_property_set_value(&connector->base,
                                                         psb_intel_sdvo_connector->bottom, val);
                        if (psb_intel_sdvo_connector->top_margin == temp_value)
                                return 0;
@@ -1785,7 +1785,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector,
                        cmd = SDVO_CMD_SET_OVERSCAN_V;
                        goto set_value;
                } else if (psb_intel_sdvo_connector->bottom == property) {
-                       drm_connector_property_set_value(connector,
+                       drm_object_property_set_value(&connector->base,
                                                         psb_intel_sdvo_connector->top, val);
                        if (psb_intel_sdvo_connector->bottom_margin == temp_value)
                                return 0;
@@ -2286,7 +2286,7 @@ static bool psb_intel_sdvo_tv_create_property(struct psb_intel_sdvo *psb_intel_s
                                i, tv_format_names[psb_intel_sdvo_connector->tv_format_supported[i]]);
 
        psb_intel_sdvo->tv_format_index = psb_intel_sdvo_connector->tv_format_supported[0];
-       drm_connector_attach_property(&psb_intel_sdvo_connector->base.base,
+       drm_object_attach_property(&psb_intel_sdvo_connector->base.base.base,
                                      psb_intel_sdvo_connector->tv_format, 0);
        return true;
 
@@ -2302,7 +2302,7 @@ static bool psb_intel_sdvo_tv_create_property(struct psb_intel_sdvo *psb_intel_s
                psb_intel_sdvo_connector->name = \
                        drm_property_create_range(dev, 0, #name, 0, data_value[0]); \
                if (!psb_intel_sdvo_connector->name) return false; \
-               drm_connector_attach_property(connector, \
+               drm_object_attach_property(&connector->base, \
                                              psb_intel_sdvo_connector->name, \
                                              psb_intel_sdvo_connector->cur_##name); \
                DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \
@@ -2339,7 +2339,7 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
                if (!psb_intel_sdvo_connector->left)
                        return false;
 
-               drm_connector_attach_property(connector,
+               drm_object_attach_property(&connector->base,
                                              psb_intel_sdvo_connector->left,
                                              psb_intel_sdvo_connector->left_margin);
 
@@ -2348,7 +2348,7 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
                if (!psb_intel_sdvo_connector->right)
                        return false;
 
-               drm_connector_attach_property(connector,
+               drm_object_attach_property(&connector->base,
                                              psb_intel_sdvo_connector->right,
                                              psb_intel_sdvo_connector->right_margin);
                DRM_DEBUG_KMS("h_overscan: max %d, "
@@ -2375,7 +2375,7 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
                if (!psb_intel_sdvo_connector->top)
                        return false;
 
-               drm_connector_attach_property(connector,
+               drm_object_attach_property(&connector->base,
                                              psb_intel_sdvo_connector->top,
                                              psb_intel_sdvo_connector->top_margin);
 
@@ -2384,7 +2384,7 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
                if (!psb_intel_sdvo_connector->bottom)
                        return false;
 
-               drm_connector_attach_property(connector,
+               drm_object_attach_property(&connector->base,
                                              psb_intel_sdvo_connector->bottom,
                                              psb_intel_sdvo_connector->bottom_margin);
                DRM_DEBUG_KMS("v_overscan: max %d, "
@@ -2416,7 +2416,7 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
                if (!psb_intel_sdvo_connector->dot_crawl)
                        return false;
 
-               drm_connector_attach_property(connector,
+               drm_object_attach_property(&connector->base,
                                              psb_intel_sdvo_connector->dot_crawl,
                                              psb_intel_sdvo_connector->cur_dot_crawl);
                DRM_DEBUG_KMS("dot crawl: current %d\n", response);
index 599099f..b865d07 100644 (file)
@@ -214,7 +214,7 @@ static enum drm_connector_status ch7006_encoder_detect(struct drm_encoder *encod
        else
                priv->subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
 
-       drm_connector_property_set_value(connector,
+       drm_object_property_set_value(&connector->base,
                        encoder->dev->mode_config.tv_subconnector_property,
                                                        priv->subconnector);
 
@@ -254,23 +254,23 @@ static int ch7006_encoder_create_resources(struct drm_encoder *encoder,
 
        priv->scale_property = drm_property_create_range(dev, 0, "scale", 0, 2);
 
-       drm_connector_attach_property(connector, conf->tv_select_subconnector_property,
+       drm_object_attach_property(&connector->base, conf->tv_select_subconnector_property,
                                      priv->select_subconnector);
-       drm_connector_attach_property(connector, conf->tv_subconnector_property,
+       drm_object_attach_property(&connector->base, conf->tv_subconnector_property,
                                      priv->subconnector);
-       drm_connector_attach_property(connector, conf->tv_left_margin_property,
+       drm_object_attach_property(&connector->base, conf->tv_left_margin_property,
                                      priv->hmargin);
-       drm_connector_attach_property(connector, conf->tv_bottom_margin_property,
+       drm_object_attach_property(&connector->base, conf->tv_bottom_margin_property,
                                      priv->vmargin);
-       drm_connector_attach_property(connector, conf->tv_mode_property,
+       drm_object_attach_property(&connector->base, conf->tv_mode_property,
                                      priv->norm);
-       drm_connector_attach_property(connector, conf->tv_brightness_property,
+       drm_object_attach_property(&connector->base, conf->tv_brightness_property,
                                      priv->brightness);
-       drm_connector_attach_property(connector, conf->tv_contrast_property,
+       drm_object_attach_property(&connector->base, conf->tv_contrast_property,
                                      priv->contrast);
-       drm_connector_attach_property(connector, conf->tv_flicker_reduction_property,
+       drm_object_attach_property(&connector->base, conf->tv_flicker_reduction_property,
                                      priv->flicker);
-       drm_connector_attach_property(connector, priv->scale_property,
+       drm_object_attach_property(&connector->base, priv->scale_property,
                                      priv->scale);
 
        return 0;
index dde8b50..e6a11ca 100644 (file)
@@ -317,7 +317,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
                        seq_printf(m, "No flip due on pipe %c (plane %c)\n",
                                   pipe, plane);
                } else {
-                       if (!work->pending) {
+                       if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
                                seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
                                           pipe, plane);
                        } else {
@@ -328,7 +328,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
                                seq_printf(m, "Stall check enabled, ");
                        else
                                seq_printf(m, "Stall check waiting for page flip ioctl, ");
-                       seq_printf(m, "%d prepares\n", work->pending);
+                       seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
 
                        if (work->old_fb_obj) {
                                struct drm_i915_gem_object *obj = work->old_fb_obj;
@@ -655,10 +655,12 @@ static void i915_ring_error_state(struct seq_file *m,
        if (INTEL_INFO(dev)->gen >= 6) {
                seq_printf(m, "  RC PSMI: 0x%08x\n", error->rc_psmi[ring]);
                seq_printf(m, "  FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
-               seq_printf(m, "  SYNC_0: 0x%08x\n",
-                          error->semaphore_mboxes[ring][0]);
-               seq_printf(m, "  SYNC_1: 0x%08x\n",
-                          error->semaphore_mboxes[ring][1]);
+               seq_printf(m, "  SYNC_0: 0x%08x [last synced 0x%08x]\n",
+                          error->semaphore_mboxes[ring][0],
+                          error->semaphore_seqno[ring][0]);
+               seq_printf(m, "  SYNC_1: 0x%08x [last synced 0x%08x]\n",
+                          error->semaphore_mboxes[ring][1],
+                          error->semaphore_seqno[ring][1]);
        }
        seq_printf(m, "  seqno: 0x%08x\n", error->seqno[ring]);
        seq_printf(m, "  waiting: %s\n", yesno(error->waiting[ring]));
@@ -1068,7 +1070,7 @@ static int gen6_drpc_info(struct seq_file *m)
        struct drm_info_node *node = (struct drm_info_node *) m->private;
        struct drm_device *dev = node->minor->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 rpmodectl1, gt_core_status, rcctl1;
+       u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
        unsigned forcewake_count;
        int count=0, ret;
 
@@ -1097,6 +1099,9 @@ static int gen6_drpc_info(struct seq_file *m)
        rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
        rcctl1 = I915_READ(GEN6_RC_CONTROL);
        mutex_unlock(&dev->struct_mutex);
+       mutex_lock(&dev_priv->rps.hw_lock);
+       sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
+       mutex_unlock(&dev_priv->rps.hw_lock);
 
        seq_printf(m, "Video Turbo Mode: %s\n",
                   yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
@@ -1148,6 +1153,12 @@ static int gen6_drpc_info(struct seq_file *m)
        seq_printf(m, "RC6++ residency since boot: %u\n",
                   I915_READ(GEN6_GT_GFX_RC6pp));
 
+       seq_printf(m, "RC6   voltage: %dmV\n",
+                  GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
+       seq_printf(m, "RC6+  voltage: %dmV\n",
+                  GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
+       seq_printf(m, "RC6++ voltage: %dmV\n",
+                  GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
        return 0;
 }
 
@@ -1273,7 +1284,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
                return 0;
        }
 
-       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
        if (ret)
                return ret;
 
@@ -1282,19 +1293,14 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
        for (gpu_freq = dev_priv->rps.min_delay;
             gpu_freq <= dev_priv->rps.max_delay;
             gpu_freq++) {
-               I915_WRITE(GEN6_PCODE_DATA, gpu_freq);
-               I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
-                          GEN6_PCODE_READ_MIN_FREQ_TABLE);
-               if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
-                             GEN6_PCODE_READY) == 0, 10)) {
-                       DRM_ERROR("pcode read of freq table timed out\n");
-                       continue;
-               }
-               ia_freq = I915_READ(GEN6_PCODE_DATA);
+               ia_freq = gpu_freq;
+               sandybridge_pcode_read(dev_priv,
+                                      GEN6_PCODE_READ_MIN_FREQ_TABLE,
+                                      &ia_freq);
                seq_printf(m, "%d\t\t%d\n", gpu_freq * GT_FREQUENCY_MULTIPLIER, ia_freq * 100);
        }
 
-       mutex_unlock(&dev->struct_mutex);
+       mutex_unlock(&dev_priv->rps.hw_lock);
 
        return 0;
 }
@@ -1398,15 +1404,15 @@ static int i915_context_status(struct seq_file *m, void *unused)
        if (ret)
                return ret;
 
-       if (dev_priv->pwrctx) {
+       if (dev_priv->ips.pwrctx) {
                seq_printf(m, "power context ");
-               describe_obj(m, dev_priv->pwrctx);
+               describe_obj(m, dev_priv->ips.pwrctx);
                seq_printf(m, "\n");
        }
 
-       if (dev_priv->renderctx) {
+       if (dev_priv->ips.renderctx) {
                seq_printf(m, "render context ");
-               describe_obj(m, dev_priv->renderctx);
+               describe_obj(m, dev_priv->ips.renderctx);
                seq_printf(m, "\n");
        }
 
@@ -1711,13 +1717,13 @@ i915_max_freq_read(struct file *filp,
        if (!(IS_GEN6(dev) || IS_GEN7(dev)))
                return -ENODEV;
 
-       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
        if (ret)
                return ret;
 
        len = snprintf(buf, sizeof(buf),
                       "max freq: %d\n", dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER);
-       mutex_unlock(&dev->struct_mutex);
+       mutex_unlock(&dev_priv->rps.hw_lock);
 
        if (len > sizeof(buf))
                len = sizeof(buf);
@@ -1752,7 +1758,7 @@ i915_max_freq_write(struct file *filp,
 
        DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val);
 
-       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
        if (ret)
                return ret;
 
@@ -1762,7 +1768,7 @@ i915_max_freq_write(struct file *filp,
        dev_priv->rps.max_delay = val / GT_FREQUENCY_MULTIPLIER;
 
        gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER);
-       mutex_unlock(&dev->struct_mutex);
+       mutex_unlock(&dev_priv->rps.hw_lock);
 
        return cnt;
 }
@@ -1787,13 +1793,13 @@ i915_min_freq_read(struct file *filp, char __user *ubuf, size_t max,
        if (!(IS_GEN6(dev) || IS_GEN7(dev)))
                return -ENODEV;
 
-       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
        if (ret)
                return ret;
 
        len = snprintf(buf, sizeof(buf),
                       "min freq: %d\n", dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER);
-       mutex_unlock(&dev->struct_mutex);
+       mutex_unlock(&dev_priv->rps.hw_lock);
 
        if (len > sizeof(buf))
                len = sizeof(buf);
@@ -1826,7 +1832,7 @@ i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt,
 
        DRM_DEBUG_DRIVER("Manually setting min freq to %d\n", val);
 
-       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
        if (ret)
                return ret;
 
@@ -1836,7 +1842,7 @@ i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt,
        dev_priv->rps.min_delay = val / GT_FREQUENCY_MULTIPLIER;
 
        gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER);
-       mutex_unlock(&dev->struct_mutex);
+       mutex_unlock(&dev_priv->rps.hw_lock);
 
        return cnt;
 }
index 61ae104..8f63cd5 100644 (file)
@@ -104,32 +104,6 @@ static void i915_write_hws_pga(struct drm_device *dev)
 }
 
 /**
- * Sets up the hardware status page for devices that need a physical address
- * in the register.
- */
-static int i915_init_phys_hws(struct drm_device *dev)
-{
-       drm_i915_private_t *dev_priv = dev->dev_private;
-
-       /* Program Hardware Status Page */
-       dev_priv->status_page_dmah =
-               drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE);
-
-       if (!dev_priv->status_page_dmah) {
-               DRM_ERROR("Can not allocate hardware status page\n");
-               return -ENOMEM;
-       }
-
-       memset_io((void __force __iomem *)dev_priv->status_page_dmah->vaddr,
-                 0, PAGE_SIZE);
-
-       i915_write_hws_pga(dev);
-
-       DRM_DEBUG_DRIVER("Enabled hardware status page\n");
-       return 0;
-}
-
-/**
  * Frees the hardware status page, whether it's a physical address or a virtual
  * address set up by the X Server.
  */
@@ -167,7 +141,7 @@ void i915_kernel_lost_context(struct drm_device * dev)
 
        ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
        ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
-       ring->space = ring->head - (ring->tail + 8);
+       ring->space = ring->head - (ring->tail + I915_RING_FREE_SPACE);
        if (ring->space < 0)
                ring->space += ring->size;
 
@@ -451,16 +425,16 @@ static void i915_emit_breadcrumb(struct drm_device *dev)
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
 
-       dev_priv->counter++;
-       if (dev_priv->counter > 0x7FFFFFFFUL)
-               dev_priv->counter = 0;
+       dev_priv->dri1.counter++;
+       if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
+               dev_priv->dri1.counter = 0;
        if (master_priv->sarea_priv)
-               master_priv->sarea_priv->last_enqueue = dev_priv->counter;
+               master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
 
        if (BEGIN_LP_RING(4) == 0) {
                OUT_RING(MI_STORE_DWORD_INDEX);
                OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-               OUT_RING(dev_priv->counter);
+               OUT_RING(dev_priv->dri1.counter);
                OUT_RING(0);
                ADVANCE_LP_RING();
        }
@@ -602,12 +576,12 @@ static int i915_dispatch_flip(struct drm_device * dev)
 
        ADVANCE_LP_RING();
 
-       master_priv->sarea_priv->last_enqueue = dev_priv->counter++;
+       master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter++;
 
        if (BEGIN_LP_RING(4) == 0) {
                OUT_RING(MI_STORE_DWORD_INDEX);
                OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-               OUT_RING(dev_priv->counter);
+               OUT_RING(dev_priv->dri1.counter);
                OUT_RING(0);
                ADVANCE_LP_RING();
        }
@@ -618,10 +592,8 @@ static int i915_dispatch_flip(struct drm_device * dev)
 
 static int i915_quiescent(struct drm_device *dev)
 {
-       struct intel_ring_buffer *ring = LP_RING(dev->dev_private);
-
        i915_kernel_lost_context(dev);
-       return intel_wait_ring_idle(ring);
+       return intel_ring_idle(LP_RING(dev->dev_private));
 }
 
 static int i915_flush_ioctl(struct drm_device *dev, void *data,
@@ -775,21 +747,21 @@ static int i915_emit_irq(struct drm_device * dev)
 
        DRM_DEBUG_DRIVER("\n");
 
-       dev_priv->counter++;
-       if (dev_priv->counter > 0x7FFFFFFFUL)
-               dev_priv->counter = 1;
+       dev_priv->dri1.counter++;
+       if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
+               dev_priv->dri1.counter = 1;
        if (master_priv->sarea_priv)
-               master_priv->sarea_priv->last_enqueue = dev_priv->counter;
+               master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
 
        if (BEGIN_LP_RING(4) == 0) {
                OUT_RING(MI_STORE_DWORD_INDEX);
                OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-               OUT_RING(dev_priv->counter);
+               OUT_RING(dev_priv->dri1.counter);
                OUT_RING(MI_USER_INTERRUPT);
                ADVANCE_LP_RING();
        }
 
-       return dev_priv->counter;
+       return dev_priv->dri1.counter;
 }
 
 static int i915_wait_irq(struct drm_device * dev, int irq_nr)
@@ -820,7 +792,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
 
        if (ret == -EBUSY) {
                DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
-                         READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
+                         READ_BREADCRUMB(dev_priv), (int)dev_priv->dri1.counter);
        }
 
        return ret;
@@ -1014,6 +986,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
        case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
                value = 1;
                break;
+       case I915_PARAM_HAS_SECURE_BATCHES:
+               value = capable(CAP_SYS_ADMIN);
+               break;
        default:
                DRM_DEBUG_DRIVER("Unknown parameter %d\n",
                                 param->param);
@@ -1068,7 +1043,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        drm_i915_hws_addr_t *hws = data;
-       struct intel_ring_buffer *ring = LP_RING(dev_priv);
+       struct intel_ring_buffer *ring;
 
        if (drm_core_check_feature(dev, DRIVER_MODESET))
                return -ENODEV;
@@ -1088,6 +1063,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
 
        DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr);
 
+       ring = LP_RING(dev_priv);
        ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
 
        dev_priv->dri1.gfx_hws_cpu_addr =
@@ -1326,6 +1302,8 @@ static int i915_load_modeset_init(struct drm_device *dev)
 
        intel_modeset_gem_init(dev);
 
+       INIT_WORK(&dev_priv->console_resume_work, intel_console_resume);
+
        ret = drm_irq_install(dev);
        if (ret)
                goto cleanup_gem;
@@ -1491,19 +1469,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
                goto free_priv;
        }
 
-       ret = intel_gmch_probe(dev_priv->bridge_dev, dev->pdev, NULL);
-       if (!ret) {
-               DRM_ERROR("failed to set up gmch\n");
-               ret = -EIO;
+       ret = i915_gem_gtt_init(dev);
+       if (ret)
                goto put_bridge;
-       }
-
-       dev_priv->mm.gtt = intel_gtt_get();
-       if (!dev_priv->mm.gtt) {
-               DRM_ERROR("Failed to initialize GTT\n");
-               ret = -ENODEV;
-               goto put_gmch;
-       }
 
        if (drm_core_check_feature(dev, DRIVER_MODESET))
                i915_kick_out_firmware_fb(dev_priv);
@@ -1590,18 +1558,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
        intel_setup_gmbus(dev);
        intel_opregion_setup(dev);
 
-       /* Make sure the bios did its job and set up vital registers */
        intel_setup_bios(dev);
 
        i915_gem_load(dev);
 
-       /* Init HWS */
-       if (!I915_NEED_GFX_HWS(dev)) {
-               ret = i915_init_phys_hws(dev);
-               if (ret)
-                       goto out_gem_unload;
-       }
-
        /* On the 945G/GM, the chipset reports the MSI capability on the
         * integrated graphics even though the support isn't actually there
         * according to the published specs.  It doesn't appear to function
@@ -1621,6 +1581,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
        spin_lock_init(&dev_priv->rps.lock);
        spin_lock_init(&dev_priv->dpio_lock);
 
+       mutex_init(&dev_priv->rps.hw_lock);
+
        if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
                dev_priv->num_pipe = 3;
        else if (IS_MOBILE(dev) || !IS_GEN2(dev))
@@ -1678,7 +1640,7 @@ out_mtrrfree:
 out_rmmap:
        pci_iounmap(dev->pdev, dev_priv->regs);
 put_gmch:
-       intel_gmch_remove();
+       i915_gem_gtt_fini(dev);
 put_bridge:
        pci_dev_put(dev_priv->bridge_dev);
 free_priv:
@@ -1721,6 +1683,7 @@ int i915_driver_unload(struct drm_device *dev)
        if (drm_core_check_feature(dev, DRIVER_MODESET)) {
                intel_fbdev_fini(dev);
                intel_modeset_cleanup(dev);
+               cancel_work_sync(&dev_priv->console_resume_work);
 
                /*
                 * free the memory space allocated for the child device
index 6770ee6..530db83 100644 (file)
@@ -47,11 +47,11 @@ MODULE_PARM_DESC(modeset,
 unsigned int i915_fbpercrtc __always_unused = 0;
 module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
 
-int i915_panel_ignore_lid __read_mostly = 0;
+int i915_panel_ignore_lid __read_mostly = 1;
 module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600);
 MODULE_PARM_DESC(panel_ignore_lid,
-               "Override lid status (0=autodetect [default], 1=lid open, "
-               "-1=lid closed)");
+               "Override lid status (0=autodetect, 1=autodetect disabled [default], "
+               "-1=force lid closed, -2=force lid open)");
 
 unsigned int i915_powersave __read_mostly = 1;
 module_param_named(powersave, i915_powersave, int, 0600);
@@ -396,12 +396,6 @@ static const struct pci_device_id pciidlist[] = {          /* aka */
 MODULE_DEVICE_TABLE(pci, pciidlist);
 #endif
 
-#define INTEL_PCH_DEVICE_ID_MASK       0xff00
-#define INTEL_PCH_IBX_DEVICE_ID_TYPE   0x3b00
-#define INTEL_PCH_CPT_DEVICE_ID_TYPE   0x1c00
-#define INTEL_PCH_PPT_DEVICE_ID_TYPE   0x1e00
-#define INTEL_PCH_LPT_DEVICE_ID_TYPE   0x8c00
-
 void intel_detect_pch(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -416,26 +410,36 @@ void intel_detect_pch(struct drm_device *dev)
        pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
        if (pch) {
                if (pch->vendor == PCI_VENDOR_ID_INTEL) {
-                       int id;
+                       unsigned short id;
                        id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
+                       dev_priv->pch_id = id;
 
                        if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
                                dev_priv->pch_type = PCH_IBX;
                                dev_priv->num_pch_pll = 2;
                                DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
+                               WARN_ON(!IS_GEN5(dev));
                        } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
                                dev_priv->pch_type = PCH_CPT;
                                dev_priv->num_pch_pll = 2;
                                DRM_DEBUG_KMS("Found CougarPoint PCH\n");
+                               WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
                        } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
                                /* PantherPoint is CPT compatible */
                                dev_priv->pch_type = PCH_CPT;
                                dev_priv->num_pch_pll = 2;
                                DRM_DEBUG_KMS("Found PatherPoint PCH\n");
+                               WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
                        } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
                                dev_priv->pch_type = PCH_LPT;
                                dev_priv->num_pch_pll = 0;
                                DRM_DEBUG_KMS("Found LynxPoint PCH\n");
+                               WARN_ON(!IS_HASWELL(dev));
+                       } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
+                               dev_priv->pch_type = PCH_LPT;
+                               dev_priv->num_pch_pll = 0;
+                               DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
+                               WARN_ON(!IS_HASWELL(dev));
                        }
                        BUG_ON(dev_priv->num_pch_pll > I915_NUM_PLLS);
                }
@@ -477,6 +481,8 @@ static int i915_drm_freeze(struct drm_device *dev)
                        return error;
                }
 
+               cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
+
                intel_modeset_disable(dev);
 
                drm_irq_uninstall(dev);
@@ -526,24 +532,29 @@ int i915_suspend(struct drm_device *dev, pm_message_t state)
        return 0;
 }
 
-static int i915_drm_thaw(struct drm_device *dev)
+void intel_console_resume(struct work_struct *work)
+{
+       struct drm_i915_private *dev_priv =
+               container_of(work, struct drm_i915_private,
+                            console_resume_work);
+       struct drm_device *dev = dev_priv->dev;
+
+       console_lock();
+       intel_fbdev_set_suspend(dev, 0);
+       console_unlock();
+}
+
+static int __i915_drm_thaw(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        int error = 0;
 
-       if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-               mutex_lock(&dev->struct_mutex);
-               i915_gem_restore_gtt_mappings(dev);
-               mutex_unlock(&dev->struct_mutex);
-       }
-
        i915_restore_state(dev);
        intel_opregion_setup(dev);
 
        /* KMS EnterVT equivalent */
        if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-               if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
-                       ironlake_init_pch_refclk(dev);
+               intel_init_pch_refclk(dev);
 
                mutex_lock(&dev->struct_mutex);
                dev_priv->mm.suspended = 0;
@@ -552,8 +563,7 @@ static int i915_drm_thaw(struct drm_device *dev)
                mutex_unlock(&dev->struct_mutex);
 
                intel_modeset_init_hw(dev);
-               intel_modeset_setup_hw_state(dev);
-               drm_mode_config_reset(dev);
+               intel_modeset_setup_hw_state(dev, false);
                drm_irq_install(dev);
        }
 
@@ -561,14 +571,41 @@ static int i915_drm_thaw(struct drm_device *dev)
 
        dev_priv->modeset_on_lid = 0;
 
-       console_lock();
-       intel_fbdev_set_suspend(dev, 0);
-       console_unlock();
+       /*
+        * The console lock can be pretty contented on resume due
+        * to all the printk activity.  Try to keep it out of the hot
+        * path of resume if possible.
+        */
+       if (console_trylock()) {
+               intel_fbdev_set_suspend(dev, 0);
+               console_unlock();
+       } else {
+               schedule_work(&dev_priv->console_resume_work);
+       }
+
+       return error;
+}
+
+static int i915_drm_thaw(struct drm_device *dev)
+{
+       int error = 0;
+
+       intel_gt_reset(dev);
+
+       if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+               mutex_lock(&dev->struct_mutex);
+               i915_gem_restore_gtt_mappings(dev);
+               mutex_unlock(&dev->struct_mutex);
+       }
+
+       __i915_drm_thaw(dev);
+
        return error;
 }
 
 int i915_resume(struct drm_device *dev)
 {
+       struct drm_i915_private *dev_priv = dev->dev_private;
        int ret;
 
        if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
@@ -579,7 +616,20 @@ int i915_resume(struct drm_device *dev)
 
        pci_set_master(dev->pdev);
 
-       ret = i915_drm_thaw(dev);
+       intel_gt_reset(dev);
+
+       /*
+        * Platforms with opregion should have sane BIOS, older ones (gen3 and
+        * earlier) need this since the BIOS might clear all our scratch PTEs.
+        */
+       if (drm_core_check_feature(dev, DRIVER_MODESET) &&
+           !dev_priv->opregion.header) {
+               mutex_lock(&dev->struct_mutex);
+               i915_gem_restore_gtt_mappings(dev);
+               mutex_unlock(&dev->struct_mutex);
+       }
+
+       ret = __i915_drm_thaw(dev);
        if (ret)
                return ret;
 
@@ -833,7 +883,7 @@ i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        struct intel_device_info *intel_info =
                (struct intel_device_info *) ent->driver_data;
 
-       if (intel_info->is_haswell || intel_info->is_valleyview)
+       if (intel_info->is_valleyview)
                if(!i915_preliminary_hw_support) {
                        DRM_ERROR("Preliminary hardware support disabled\n");
                        return -ENODEV;
@@ -1140,12 +1190,40 @@ static bool IS_DISPLAYREG(u32 reg)
        if (reg == GEN6_GDRST)
                return false;
 
+       switch (reg) {
+       case _3D_CHICKEN3:
+       case IVB_CHICKEN3:
+       case GEN7_COMMON_SLICE_CHICKEN1:
+       case GEN7_L3CNTLREG1:
+       case GEN7_L3_CHICKEN_MODE_REGISTER:
+       case GEN7_ROW_CHICKEN2:
+       case GEN7_L3SQCREG4:
+       case GEN7_SQ_CHICKEN_MBCUNIT_CONFIG:
+       case GEN7_HALF_SLICE_CHICKEN1:
+       case GEN6_MBCTL:
+       case GEN6_UCGCTL2:
+               return false;
+       default:
+               break;
+       }
+
        return true;
 }
 
+static void
+ilk_dummy_write(struct drm_i915_private *dev_priv)
+{
+       /* WaIssueDummyWriteToWakeupFromRC6: Issue a dummy write to wake up the
+        * chip from rc6 before touching it for real. MI_MODE is masked, hence
+        * harmless to write 0 into. */
+       I915_WRITE_NOTRACE(MI_MODE, 0);
+}
+
 #define __i915_read(x, y) \
 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
        u##x val = 0; \
+       if (IS_GEN5(dev_priv->dev)) \
+               ilk_dummy_write(dev_priv); \
        if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
                unsigned long irqflags; \
                spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
@@ -1177,6 +1255,12 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
        if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
                __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
        } \
+       if (IS_GEN5(dev_priv->dev)) \
+               ilk_dummy_write(dev_priv); \
+       if (IS_HASWELL(dev_priv->dev) && (I915_READ_NOTRACE(GEN7_ERR_INT) & ERR_INT_MMIO_UNCLAIMED)) { \
+               DRM_ERROR("Unknown unclaimed register before writing to %x\n", reg); \
+               I915_WRITE_NOTRACE(GEN7_ERR_INT, ERR_INT_MMIO_UNCLAIMED); \
+       } \
        if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \
                write##y(val, dev_priv->regs + reg + 0x180000);         \
        } else {                                                        \
index f511fa2..557843d 100644 (file)
@@ -58,6 +58,14 @@ enum pipe {
 };
 #define pipe_name(p) ((p) + 'A')
 
+enum transcoder {
+       TRANSCODER_A = 0,
+       TRANSCODER_B,
+       TRANSCODER_C,
+       TRANSCODER_EDP = 0xF,
+};
+#define transcoder_name(t) ((t) + 'A')
+
 enum plane {
        PLANE_A = 0,
        PLANE_B,
@@ -93,6 +101,12 @@ struct intel_pch_pll {
 };
 #define I915_NUM_PLLS 2
 
+struct intel_ddi_plls {
+       int spll_refcount;
+       int wrpll1_refcount;
+       int wrpll2_refcount;
+};
+
 /* Interface history:
  *
  * 1.1: Original.
@@ -123,14 +137,6 @@ struct drm_i915_gem_phys_object {
        struct drm_i915_gem_object *cur_obj;
 };
 
-struct mem_block {
-       struct mem_block *next;
-       struct mem_block *prev;
-       int start;
-       int size;
-       struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
-};
-
 struct opregion_header;
 struct opregion_acpi;
 struct opregion_swsci;
@@ -191,6 +197,7 @@ struct drm_i915_error_state {
        u32 instdone[I915_NUM_RINGS];
        u32 acthd[I915_NUM_RINGS];
        u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1];
+       u32 semaphore_seqno[I915_NUM_RINGS][I915_NUM_RINGS - 1];
        u32 rc_psmi[I915_NUM_RINGS]; /* sleep state */
        /* our own tracking of ring head and tail */
        u32 cpu_ring_head[I915_NUM_RINGS];
@@ -251,6 +258,7 @@ struct drm_i915_display_funcs {
                                 uint32_t sprite_width, int pixel_size);
        void (*update_linetime_wm)(struct drm_device *dev, int pipe,
                                 struct drm_display_mode *mode);
+       void (*modeset_global_resources)(struct drm_device *dev);
        int (*crtc_mode_set)(struct drm_crtc *crtc,
                             struct drm_display_mode *mode,
                             struct drm_display_mode *adjusted_mode,
@@ -263,7 +271,6 @@ struct drm_i915_display_funcs {
                          struct drm_crtc *crtc);
        void (*fdi_link_train)(struct drm_crtc *crtc);
        void (*init_clock_gating)(struct drm_device *dev);
-       void (*init_pch_clock_gating)(struct drm_device *dev);
        int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
                          struct drm_framebuffer *fb,
                          struct drm_i915_gem_object *obj);
@@ -338,6 +345,7 @@ struct intel_device_info {
 #define I915_PPGTT_PD_ENTRIES 512
 #define I915_PPGTT_PT_ENTRIES 1024
 struct i915_hw_ppgtt {
+       struct drm_device *dev;
        unsigned num_pd_entries;
        struct page **pt_pages;
        uint32_t pd_offset;
@@ -374,6 +382,11 @@ enum intel_pch {
        PCH_LPT,        /* Lynxpoint PCH */
 };
 
+enum intel_sbi_destination {
+       SBI_ICLK,
+       SBI_MPHY,
+};
+
 #define QUIRK_PIPEA_FORCE (1<<0)
 #define QUIRK_LVDS_SSC_DISABLE (1<<1)
 #define QUIRK_INVERT_BRIGHTNESS (1<<2)
@@ -383,154 +396,18 @@ struct intel_fbc_work;
 
 struct intel_gmbus {
        struct i2c_adapter adapter;
-       bool force_bit;
+       u32 force_bit;
        u32 reg0;
        u32 gpio_reg;
        struct i2c_algo_bit_data bit_algo;
        struct drm_i915_private *dev_priv;
 };
 
-typedef struct drm_i915_private {
-       struct drm_device *dev;
-
-       const struct intel_device_info *info;
-
-       int relative_constants_mode;
-
-       void __iomem *regs;
-
-       struct drm_i915_gt_funcs gt;
-       /** gt_fifo_count and the subsequent register write are synchronized
-        * with dev->struct_mutex. */
-       unsigned gt_fifo_count;
-       /** forcewake_count is protected by gt_lock */
-       unsigned forcewake_count;
-       /** gt_lock is also taken in irq contexts. */
-       struct spinlock gt_lock;
-
-       struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
-
-       /** gmbus_mutex protects against concurrent usage of the single hw gmbus
-        * controller on different i2c buses. */
-       struct mutex gmbus_mutex;
-
-       /**
-        * Base address of the gmbus and gpio block.
-        */
-       uint32_t gpio_mmio_base;
-
-       struct pci_dev *bridge_dev;
-       struct intel_ring_buffer ring[I915_NUM_RINGS];
-       uint32_t next_seqno;
-
-       drm_dma_handle_t *status_page_dmah;
-       uint32_t counter;
-       struct drm_i915_gem_object *pwrctx;
-       struct drm_i915_gem_object *renderctx;
-
-       struct resource mch_res;
-
-       atomic_t irq_received;
-
-       /* protects the irq masks */
-       spinlock_t irq_lock;
-
-       /* DPIO indirect register protection */
-       spinlock_t dpio_lock;
-
-       /** Cached value of IMR to avoid reads in updating the bitfield */
-       u32 pipestat[2];
-       u32 irq_mask;
-       u32 gt_irq_mask;
-       u32 pch_irq_mask;
-
-       u32 hotplug_supported_mask;
-       struct work_struct hotplug_work;
-
-       int num_pipe;
-       int num_pch_pll;
-
-       /* For hangcheck timer */
-#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
-       struct timer_list hangcheck_timer;
-       int hangcheck_count;
-       uint32_t last_acthd[I915_NUM_RINGS];
-       uint32_t prev_instdone[I915_NUM_INSTDONE_REG];
-
-       unsigned int stop_rings;
-
-       unsigned long cfb_size;
-       unsigned int cfb_fb;
-       enum plane cfb_plane;
-       int cfb_y;
-       struct intel_fbc_work *fbc_work;
-
-       struct intel_opregion opregion;
-
-       /* overlay */
-       struct intel_overlay *overlay;
-       bool sprite_scaling_enabled;
-
-       /* LVDS info */
-       int backlight_level;  /* restore backlight to this value */
-       bool backlight_enabled;
-       struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
-       struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
-
-       /* Feature bits from the VBIOS */
-       unsigned int int_tv_support:1;
-       unsigned int lvds_dither:1;
-       unsigned int lvds_vbt:1;
-       unsigned int int_crt_support:1;
-       unsigned int lvds_use_ssc:1;
-       unsigned int display_clock_mode:1;
-       int lvds_ssc_freq;
-       unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
-       unsigned int lvds_val; /* used for checking LVDS channel mode */
-       struct {
-               int rate;
-               int lanes;
-               int preemphasis;
-               int vswing;
-
-               bool initialized;
-               bool support;
-               int bpp;
-               struct edp_power_seq pps;
-       } edp;
-       bool no_aux_handshake;
-
-       struct notifier_block lid_notifier;
-
-       int crt_ddc_pin;
-       struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
-       int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
-       int num_fence_regs; /* 8 on pre-965, 16 otherwise */
-
-       unsigned int fsb_freq, mem_freq, is_ddr3;
-
-       spinlock_t error_lock;
-       /* Protected by dev->error_lock. */
-       struct drm_i915_error_state *first_error;
-       struct work_struct error_work;
-       struct completion error_completion;
-       struct workqueue_struct *wq;
-
-       /* Display functions */
-       struct drm_i915_display_funcs display;
-
-       /* PCH chipset type */
-       enum intel_pch pch_type;
-
-       unsigned long quirks;
-
-       /* Register state */
-       bool modeset_on_lid;
+struct i915_suspend_saved_registers {
        u8 saveLBB;
        u32 saveDSPACNTR;
        u32 saveDSPBCNTR;
        u32 saveDSPARB;
-       u32 saveHWS;
        u32 savePIPEACONF;
        u32 savePIPEBCONF;
        u32 savePIPEASRC;
@@ -676,10 +553,206 @@ typedef struct drm_i915_private {
        u32 savePIPEB_LINK_N1;
        u32 saveMCHBAR_RENDER_STANDBY;
        u32 savePCH_PORT_HOTPLUG;
+};
+
+struct intel_gen6_power_mgmt {
+       struct work_struct work;
+       u32 pm_iir;
+       /* lock - irqsave spinlock that protectects the work_struct and
+        * pm_iir. */
+       spinlock_t lock;
+
+       /* The below variables an all the rps hw state are protected by
+        * dev->struct mutext. */
+       u8 cur_delay;
+       u8 min_delay;
+       u8 max_delay;
+
+       struct delayed_work delayed_resume_work;
+
+       /*
+        * Protects RPS/RC6 register access and PCU communication.
+        * Must be taken after struct_mutex if nested.
+        */
+       struct mutex hw_lock;
+};
+
+struct intel_ilk_power_mgmt {
+       u8 cur_delay;
+       u8 min_delay;
+       u8 max_delay;
+       u8 fmax;
+       u8 fstart;
+
+       u64 last_count1;
+       unsigned long last_time1;
+       unsigned long chipset_power;
+       u64 last_count2;
+       struct timespec last_time2;
+       unsigned long gfx_power;
+       u8 corr;
+
+       int c_m;
+       int r_t;
+
+       struct drm_i915_gem_object *pwrctx;
+       struct drm_i915_gem_object *renderctx;
+};
+
+struct i915_dri1_state {
+       unsigned allow_batchbuffer : 1;
+       u32 __iomem *gfx_hws_cpu_addr;
+
+       unsigned int cpp;
+       int back_offset;
+       int front_offset;
+       int current_page;
+       int page_flipping;
+
+       uint32_t counter;
+};
+
+struct intel_l3_parity {
+       u32 *remap_info;
+       struct work_struct error_work;
+};
+
+typedef struct drm_i915_private {
+       struct drm_device *dev;
+
+       const struct intel_device_info *info;
+
+       int relative_constants_mode;
+
+       void __iomem *regs;
+
+       struct drm_i915_gt_funcs gt;
+       /** gt_fifo_count and the subsequent register write are synchronized
+        * with dev->struct_mutex. */
+       unsigned gt_fifo_count;
+       /** forcewake_count is protected by gt_lock */
+       unsigned forcewake_count;
+       /** gt_lock is also taken in irq contexts. */
+       struct spinlock gt_lock;
+
+       struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
+
+       /** gmbus_mutex protects against concurrent usage of the single hw gmbus
+        * controller on different i2c buses. */
+       struct mutex gmbus_mutex;
+
+       /**
+        * Base address of the gmbus and gpio block.
+        */
+       uint32_t gpio_mmio_base;
+
+       struct pci_dev *bridge_dev;
+       struct intel_ring_buffer ring[I915_NUM_RINGS];
+       uint32_t next_seqno;
+
+       drm_dma_handle_t *status_page_dmah;
+       struct resource mch_res;
+
+       atomic_t irq_received;
+
+       /* protects the irq masks */
+       spinlock_t irq_lock;
+
+       /* DPIO indirect register protection */
+       spinlock_t dpio_lock;
+
+       /** Cached value of IMR to avoid reads in updating the bitfield */
+       u32 pipestat[2];
+       u32 irq_mask;
+       u32 gt_irq_mask;
+       u32 pch_irq_mask;
+
+       u32 hotplug_supported_mask;
+       struct work_struct hotplug_work;
+
+       int num_pipe;
+       int num_pch_pll;
+
+       /* For hangcheck timer */
+#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
+#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
+       struct timer_list hangcheck_timer;
+       int hangcheck_count;
+       uint32_t last_acthd[I915_NUM_RINGS];
+       uint32_t prev_instdone[I915_NUM_INSTDONE_REG];
+
+       unsigned int stop_rings;
+
+       unsigned long cfb_size;
+       unsigned int cfb_fb;
+       enum plane cfb_plane;
+       int cfb_y;
+       struct intel_fbc_work *fbc_work;
+
+       struct intel_opregion opregion;
+
+       /* overlay */
+       struct intel_overlay *overlay;
+       bool sprite_scaling_enabled;
+
+       /* LVDS info */
+       int backlight_level;  /* restore backlight to this value */
+       bool backlight_enabled;
+       struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
+       struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
+
+       /* Feature bits from the VBIOS */
+       unsigned int int_tv_support:1;
+       unsigned int lvds_dither:1;
+       unsigned int lvds_vbt:1;
+       unsigned int int_crt_support:1;
+       unsigned int lvds_use_ssc:1;
+       unsigned int display_clock_mode:1;
+       int lvds_ssc_freq;
+       unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
+       unsigned int lvds_val; /* used for checking LVDS channel mode */
+       struct {
+               int rate;
+               int lanes;
+               int preemphasis;
+               int vswing;
+
+               bool initialized;
+               bool support;
+               int bpp;
+               struct edp_power_seq pps;
+       } edp;
+       bool no_aux_handshake;
+
+       int crt_ddc_pin;
+       struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
+       int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
+       int num_fence_regs; /* 8 on pre-965, 16 otherwise */
+
+       unsigned int fsb_freq, mem_freq, is_ddr3;
+
+       spinlock_t error_lock;
+       /* Protected by dev->error_lock. */
+       struct drm_i915_error_state *first_error;
+       struct work_struct error_work;
+       struct completion error_completion;
+       struct workqueue_struct *wq;
+
+       /* Display functions */
+       struct drm_i915_display_funcs display;
+
+       /* PCH chipset type */
+       enum intel_pch pch_type;
+       unsigned short pch_id;
+
+       unsigned long quirks;
+
+       /* Register state */
+       bool modeset_on_lid;
 
        struct {
                /** Bridge to intel-gtt-ko */
-               const struct intel_gtt *gtt;
+               struct intel_gtt *gtt;
                /** Memory allocator for GTT stolen memory */
                struct drm_mm stolen;
                /** Memory allocator for GTT */
@@ -706,8 +779,6 @@ typedef struct drm_i915_private {
                /** PPGTT used for aliasing the PPGTT with the GTT */
                struct i915_hw_ppgtt *aliasing_ppgtt;
 
-               u32 *l3_remap_info;
-
                struct shrinker inactive_shrinker;
 
                /**
@@ -785,19 +856,6 @@ typedef struct drm_i915_private {
                u32 object_count;
        } mm;
 
-       /* Old dri1 support infrastructure, beware the dragons ya fools entering
-        * here! */
-       struct {
-               unsigned allow_batchbuffer : 1;
-               u32 __iomem *gfx_hws_cpu_addr;
-
-               unsigned int cpp;
-               int back_offset;
-               int front_offset;
-               int current_page;
-               int page_flipping;
-       } dri1;
-
        /* Kernel Modesetting */
 
        struct sdvo_device_mapping sdvo_mappings[2];
@@ -811,6 +869,7 @@ typedef struct drm_i915_private {
        wait_queue_head_t pending_flip_queue;
 
        struct intel_pch_pll pch_plls[I915_NUM_PLLS];
+       struct intel_ddi_plls ddi_plls;
 
        /* Reclocking support */
        bool render_reclock_avail;
@@ -820,46 +879,17 @@ typedef struct drm_i915_private {
        u16 orig_clock;
        int child_dev_num;
        struct child_device_config *child_dev;
-       struct drm_connector *int_lvds_connector;
-       struct drm_connector *int_edp_connector;
 
        bool mchbar_need_disable;
 
+       struct intel_l3_parity l3_parity;
+
        /* gen6+ rps state */
-       struct {
-               struct work_struct work;
-               u32 pm_iir;
-               /* lock - irqsave spinlock that protectects the work_struct and
-                * pm_iir. */
-               spinlock_t lock;
-
-               /* The below variables an all the rps hw state are protected by
-                * dev->struct mutext. */
-               u8 cur_delay;
-               u8 min_delay;
-               u8 max_delay;
-       } rps;
+       struct intel_gen6_power_mgmt rps;
 
        /* ilk-only ips/rps state. Everything in here is protected by the global
         * mchdev_lock in intel_pm.c */
-       struct {
-               u8 cur_delay;
-               u8 min_delay;
-               u8 max_delay;
-               u8 fmax;
-               u8 fstart;
-
-               u64 last_count1;
-               unsigned long last_time1;
-               unsigned long chipset_power;
-               u64 last_count2;
-               struct timespec last_time2;
-               unsigned long gfx_power;
-               u8 corr;
-
-               int c_m;
-               int r_t;
-       } ips;
+       struct intel_ilk_power_mgmt ips;
 
        enum no_fbc_reason no_fbc_reason;
 
@@ -871,14 +901,27 @@ typedef struct drm_i915_private {
        /* list of fbdev register on this device */
        struct intel_fbdev *fbdev;
 
+       /*
+        * The console may be contended at resume, but we don't
+        * want it to block on it.
+        */
+       struct work_struct console_resume_work;
+
        struct backlight_device *backlight;
 
        struct drm_property *broadcast_rgb_property;
        struct drm_property *force_audio_property;
 
-       struct work_struct parity_error_work;
        bool hw_contexts_disabled;
        uint32_t hw_context_size;
+
+       bool fdi_rx_polarity_reversed;
+
+       struct i915_suspend_saved_registers regfile;
+
+       /* Old dri1 support infrastructure, beware the dragons ya fools entering
+        * here! */
+       struct i915_dri1_state dri1;
 } drm_i915_private_t;
 
 /* Iterate over initialised rings */
@@ -1120,9 +1163,14 @@ struct drm_i915_file_private {
 #define IS_IRONLAKE_D(dev)     ((dev)->pci_device == 0x0042)
 #define IS_IRONLAKE_M(dev)     ((dev)->pci_device == 0x0046)
 #define IS_IVYBRIDGE(dev)      (INTEL_INFO(dev)->is_ivybridge)
+#define IS_IVB_GT1(dev)                ((dev)->pci_device == 0x0156 || \
+                                (dev)->pci_device == 0x0152 || \
+                                (dev)->pci_device == 0x015a)
 #define IS_VALLEYVIEW(dev)     (INTEL_INFO(dev)->is_valleyview)
 #define IS_HASWELL(dev)        (INTEL_INFO(dev)->is_haswell)
 #define IS_MOBILE(dev)         (INTEL_INFO(dev)->is_mobile)
+#define IS_ULT(dev)            (IS_HASWELL(dev) && \
+                                ((dev)->pci_device & 0xFF00) == 0x0A00)
 
 /*
  * The genX designation typically refers to the render engine, so render
@@ -1168,6 +1216,13 @@ struct drm_i915_file_private {
 
 #define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
 
+#define INTEL_PCH_DEVICE_ID_MASK               0xff00
+#define INTEL_PCH_IBX_DEVICE_ID_TYPE           0x3b00
+#define INTEL_PCH_CPT_DEVICE_ID_TYPE           0x1c00
+#define INTEL_PCH_PPT_DEVICE_ID_TYPE           0x1e00
+#define INTEL_PCH_LPT_DEVICE_ID_TYPE           0x8c00
+#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE                0x9c00
+
 #define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
 #define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
 #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
@@ -1250,6 +1305,7 @@ extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
 extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
 extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
 
+extern void intel_console_resume(struct work_struct *work);
 
 /* i915_irq.c */
 void i915_hangcheck_elapsed(unsigned long data);
@@ -1257,6 +1313,7 @@ void i915_handle_error(struct drm_device *dev, bool wedged);
 
 extern void intel_irq_init(struct drm_device *dev);
 extern void intel_gt_init(struct drm_device *dev);
+extern void intel_gt_reset(struct drm_device *dev);
 
 void i915_error_state_free(struct kref *error_ref);
 
@@ -1368,8 +1425,7 @@ int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
 int i915_gem_object_sync(struct drm_i915_gem_object *obj,
                         struct intel_ring_buffer *to);
 void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
-                                   struct intel_ring_buffer *ring,
-                                   u32 seqno);
+                                   struct intel_ring_buffer *ring);
 
 int i915_gem_dumb_create(struct drm_file *file_priv,
                         struct drm_device *dev,
@@ -1387,7 +1443,7 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
        return (int32_t)(seq1 - seq2) >= 0;
 }
 
-u32 i915_gem_next_request_seqno(struct intel_ring_buffer *ring);
+extern int i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
 
 int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
 int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
@@ -1499,6 +1555,14 @@ void i915_gem_init_global_gtt(struct drm_device *dev,
                              unsigned long start,
                              unsigned long mappable_end,
                              unsigned long end);
+int i915_gem_gtt_init(struct drm_device *dev);
+void i915_gem_gtt_fini(struct drm_device *dev);
+static inline void i915_gem_chipset_flush(struct drm_device *dev)
+{
+       if (INTEL_INFO(dev)->gen < 6)
+               intel_gtt_chipset_flush();
+}
+
 
 /* i915_gem_evict.c */
 int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
@@ -1595,11 +1659,12 @@ extern void intel_modeset_init(struct drm_device *dev);
 extern void intel_modeset_gem_init(struct drm_device *dev);
 extern void intel_modeset_cleanup(struct drm_device *dev);
 extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
-extern void intel_modeset_setup_hw_state(struct drm_device *dev);
+extern void intel_modeset_setup_hw_state(struct drm_device *dev,
+                                        bool force_restore);
 extern bool intel_fbc_enabled(struct drm_device *dev);
 extern void intel_disable_fbc(struct drm_device *dev);
 extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
-extern void ironlake_init_pch_refclk(struct drm_device *dev);
+extern void intel_init_pch_refclk(struct drm_device *dev);
 extern void gen6_set_rps(struct drm_device *dev, u8 val);
 extern void intel_detect_pch(struct drm_device *dev);
 extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
@@ -1628,6 +1693,9 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
 void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
 int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
 
+int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
+int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
+
 #define __i915_read(x, y) \
        u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
 
index 9b285da..742206e 100644 (file)
@@ -845,12 +845,12 @@ out:
                 * domain anymore. */
                if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
                        i915_gem_clflush_object(obj);
-                       intel_gtt_chipset_flush();
+                       i915_gem_chipset_flush(dev);
                }
        }
 
        if (needs_clflush_after)
-               intel_gtt_chipset_flush();
+               i915_gem_chipset_flush(dev);
 
        return ret;
 }
@@ -1345,30 +1345,17 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        trace_i915_gem_object_fault(obj, page_offset, true, write);
 
        /* Now bind it into the GTT if needed */
-       if (!obj->map_and_fenceable) {
-               ret = i915_gem_object_unbind(obj);
-               if (ret)
-                       goto unlock;
-       }
-       if (!obj->gtt_space) {
-               ret = i915_gem_object_bind_to_gtt(obj, 0, true, false);
-               if (ret)
-                       goto unlock;
-
-               ret = i915_gem_object_set_to_gtt_domain(obj, write);
-               if (ret)
-                       goto unlock;
-       }
+       ret = i915_gem_object_pin(obj, 0, true, false);
+       if (ret)
+               goto unlock;
 
-       if (!obj->has_global_gtt_mapping)
-               i915_gem_gtt_bind_object(obj, obj->cache_level);
+       ret = i915_gem_object_set_to_gtt_domain(obj, write);
+       if (ret)
+               goto unpin;
 
        ret = i915_gem_object_get_fence(obj);
        if (ret)
-               goto unlock;
-
-       if (i915_gem_object_is_inactive(obj))
-               list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+               goto unpin;
 
        obj->fault_mappable = true;
 
@@ -1377,6 +1364,8 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 
        /* Finally, remap it using the new GTT offset */
        ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
+unpin:
+       i915_gem_object_unpin(obj);
 unlock:
        mutex_unlock(&dev->struct_mutex);
 out:
@@ -1707,10 +1696,14 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
        if (obj->pages_pin_count)
                return -EBUSY;
 
+       /* ->put_pages might need to allocate memory for the bit17 swizzle
+        * array, hence protect them from being reaped by removing them from gtt
+        * lists early. */
+       list_del(&obj->gtt_list);
+
        ops->put_pages(obj);
        obj->pages = NULL;
 
-       list_del(&obj->gtt_list);
        if (i915_gem_object_is_purgeable(obj))
                i915_gem_object_truncate(obj);
 
@@ -1868,11 +1861,11 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
 
 void
 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
-                              struct intel_ring_buffer *ring,
-                              u32 seqno)
+                              struct intel_ring_buffer *ring)
 {
        struct drm_device *dev = obj->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 seqno = intel_ring_get_seqno(ring);
 
        BUG_ON(ring == NULL);
        obj->ring = ring;
@@ -1933,26 +1926,54 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
        WARN_ON(i915_verify_lists(dev));
 }
 
-static u32
-i915_gem_get_seqno(struct drm_device *dev)
+static int
+i915_gem_handle_seqno_wrap(struct drm_device *dev)
 {
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       u32 seqno = dev_priv->next_seqno;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_ring_buffer *ring;
+       int ret, i, j;
 
-       /* reserve 0 for non-seqno */
-       if (++dev_priv->next_seqno == 0)
-               dev_priv->next_seqno = 1;
+       /* The hardware uses various monotonic 32-bit counters, if we
+        * detect that they will wraparound we need to idle the GPU
+        * and reset those counters.
+        */
+       ret = 0;
+       for_each_ring(ring, dev_priv, i) {
+               for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
+                       ret |= ring->sync_seqno[j] != 0;
+       }
+       if (ret == 0)
+               return ret;
+
+       ret = i915_gpu_idle(dev);
+       if (ret)
+               return ret;
+
+       i915_gem_retire_requests(dev);
+       for_each_ring(ring, dev_priv, i) {
+               for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
+                       ring->sync_seqno[j] = 0;
+       }
 
-       return seqno;
+       return 0;
 }
 
-u32
-i915_gem_next_request_seqno(struct intel_ring_buffer *ring)
+int
+i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
 {
-       if (ring->outstanding_lazy_request == 0)
-               ring->outstanding_lazy_request = i915_gem_get_seqno(ring->dev);
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       /* reserve 0 for non-seqno */
+       if (dev_priv->next_seqno == 0) {
+               int ret = i915_gem_handle_seqno_wrap(dev);
+               if (ret)
+                       return ret;
 
-       return ring->outstanding_lazy_request;
+               dev_priv->next_seqno = 1;
+       }
+
+       *seqno = dev_priv->next_seqno++;
+       return 0;
 }
 
 int
@@ -1963,7 +1984,6 @@ i915_add_request(struct intel_ring_buffer *ring,
        drm_i915_private_t *dev_priv = ring->dev->dev_private;
        struct drm_i915_gem_request *request;
        u32 request_ring_position;
-       u32 seqno;
        int was_empty;
        int ret;
 
@@ -1982,7 +2002,6 @@ i915_add_request(struct intel_ring_buffer *ring,
        if (request == NULL)
                return -ENOMEM;
 
-       seqno = i915_gem_next_request_seqno(ring);
 
        /* Record the position of the start of the request so that
         * should we detect the updated seqno part-way through the
@@ -1991,15 +2010,13 @@ i915_add_request(struct intel_ring_buffer *ring,
         */
        request_ring_position = intel_ring_get_tail(ring);
 
-       ret = ring->add_request(ring, &seqno);
+       ret = ring->add_request(ring);
        if (ret) {
                kfree(request);
                return ret;
        }
 
-       trace_i915_gem_request_add(ring, seqno);
-
-       request->seqno = seqno;
+       request->seqno = intel_ring_get_seqno(ring);
        request->ring = ring;
        request->tail = request_ring_position;
        request->emitted_jiffies = jiffies;
@@ -2017,23 +2034,24 @@ i915_add_request(struct intel_ring_buffer *ring,
                spin_unlock(&file_priv->mm.lock);
        }
 
+       trace_i915_gem_request_add(ring, request->seqno);
        ring->outstanding_lazy_request = 0;
 
        if (!dev_priv->mm.suspended) {
                if (i915_enable_hangcheck) {
                        mod_timer(&dev_priv->hangcheck_timer,
-                                 jiffies +
-                                 msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
+                                 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
                }
                if (was_empty) {
                        queue_delayed_work(dev_priv->wq,
-                                          &dev_priv->mm.retire_work, HZ);
+                                          &dev_priv->mm.retire_work,
+                                          round_jiffies_up_relative(HZ));
                        intel_mark_busy(dev_priv->dev);
                }
        }
 
        if (out_seqno)
-               *out_seqno = seqno;
+               *out_seqno = request->seqno;
        return 0;
 }
 
@@ -2131,7 +2149,6 @@ void
 i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
 {
        uint32_t seqno;
-       int i;
 
        if (list_empty(&ring->request_list))
                return;
@@ -2140,10 +2157,6 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
 
        seqno = ring->get_seqno(ring, true);
 
-       for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++)
-               if (seqno >= ring->sync_seqno[i])
-                       ring->sync_seqno[i] = 0;
-
        while (!list_empty(&ring->request_list)) {
                struct drm_i915_gem_request *request;
 
@@ -2218,7 +2231,8 @@ i915_gem_retire_work_handler(struct work_struct *work)
 
        /* Come back later if the device is busy... */
        if (!mutex_trylock(&dev->struct_mutex)) {
-               queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
+               queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
+                                  round_jiffies_up_relative(HZ));
                return;
        }
 
@@ -2236,7 +2250,8 @@ i915_gem_retire_work_handler(struct work_struct *work)
        }
 
        if (!dev_priv->mm.suspended && !idle)
-               queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
+               queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
+                                  round_jiffies_up_relative(HZ));
        if (idle)
                intel_mark_idle(dev);
 
@@ -2386,7 +2401,11 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
 
        ret = to->sync_to(to, from, seqno);
        if (!ret)
-               from->sync_seqno[idx] = seqno;
+               /* We use last_read_seqno because sync_to()
+                * might have just caused seqno wrap under
+                * the radar.
+                */
+               from->sync_seqno[idx] = obj->last_read_seqno;
 
        return ret;
 }
@@ -2469,14 +2488,6 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
        return 0;
 }
 
-static int i915_ring_idle(struct intel_ring_buffer *ring)
-{
-       if (list_empty(&ring->active_list))
-               return 0;
-
-       return i915_wait_seqno(ring, i915_gem_next_request_seqno(ring));
-}
-
 int i915_gpu_idle(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
@@ -2489,7 +2500,7 @@ int i915_gpu_idle(struct drm_device *dev)
                if (ret)
                        return ret;
 
-               ret = i915_ring_idle(ring);
+               ret = intel_ring_idle(ring);
                if (ret)
                        return ret;
        }
@@ -2923,13 +2934,14 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
        if (ret)
                return ret;
 
+       i915_gem_object_pin_pages(obj);
+
  search_free:
        if (map_and_fenceable)
-               free_space =
-                       drm_mm_search_free_in_range_color(&dev_priv->mm.gtt_space,
-                                                         size, alignment, obj->cache_level,
-                                                         0, dev_priv->mm.gtt_mappable_end,
-                                                         false);
+               free_space = drm_mm_search_free_in_range_color(&dev_priv->mm.gtt_space,
+                                                              size, alignment, obj->cache_level,
+                                                              0, dev_priv->mm.gtt_mappable_end,
+                                                              false);
        else
                free_space = drm_mm_search_free_color(&dev_priv->mm.gtt_space,
                                                      size, alignment, obj->cache_level,
@@ -2937,60 +2949,60 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
 
        if (free_space != NULL) {
                if (map_and_fenceable)
-                       obj->gtt_space =
+                       free_space =
                                drm_mm_get_block_range_generic(free_space,
                                                               size, alignment, obj->cache_level,
                                                               0, dev_priv->mm.gtt_mappable_end,
                                                               false);
                else
-                       obj->gtt_space =
+                       free_space =
                                drm_mm_get_block_generic(free_space,
                                                         size, alignment, obj->cache_level,
                                                         false);
        }
-       if (obj->gtt_space == NULL) {
+       if (free_space == NULL) {
                ret = i915_gem_evict_something(dev, size, alignment,
                                               obj->cache_level,
                                               map_and_fenceable,
                                               nonblocking);
-               if (ret)
+               if (ret) {
+                       i915_gem_object_unpin_pages(obj);
                        return ret;
+               }
 
                goto search_free;
        }
        if (WARN_ON(!i915_gem_valid_gtt_space(dev,
-                                             obj->gtt_space,
+                                             free_space,
                                              obj->cache_level))) {
-               drm_mm_put_block(obj->gtt_space);
-               obj->gtt_space = NULL;
+               i915_gem_object_unpin_pages(obj);
+               drm_mm_put_block(free_space);
                return -EINVAL;
        }
 
-
        ret = i915_gem_gtt_prepare_object(obj);
        if (ret) {
-               drm_mm_put_block(obj->gtt_space);
-               obj->gtt_space = NULL;
+               i915_gem_object_unpin_pages(obj);
+               drm_mm_put_block(free_space);
                return ret;
        }
 
-       if (!dev_priv->mm.aliasing_ppgtt)
-               i915_gem_gtt_bind_object(obj, obj->cache_level);
-
        list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list);
        list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
 
-       obj->gtt_offset = obj->gtt_space->start;
+       obj->gtt_space = free_space;
+       obj->gtt_offset = free_space->start;
 
        fenceable =
-               obj->gtt_space->size == fence_size &&
-               (obj->gtt_space->start & (fence_alignment - 1)) == 0;
+               free_space->size == fence_size &&
+               (free_space->start & (fence_alignment - 1)) == 0;
 
        mappable =
                obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
 
        obj->map_and_fenceable = mappable && fenceable;
 
+       i915_gem_object_unpin_pages(obj);
        trace_i915_gem_object_bind(obj, map_and_fenceable);
        i915_gem_verify_gtt(dev);
        return 0;
@@ -3059,7 +3071,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
                return;
 
        i915_gem_clflush_object(obj);
-       intel_gtt_chipset_flush();
+       i915_gem_chipset_flush(obj->base.dev);
        old_write_domain = obj->base.write_domain;
        obj->base.write_domain = 0;
 
@@ -3454,11 +3466,16 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
        }
 
        if (obj->gtt_space == NULL) {
+               struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+
                ret = i915_gem_object_bind_to_gtt(obj, alignment,
                                                  map_and_fenceable,
                                                  nonblocking);
                if (ret)
                        return ret;
+
+               if (!dev_priv->mm.aliasing_ppgtt)
+                       i915_gem_gtt_bind_object(obj, obj->cache_level);
        }
 
        if (!obj->has_global_gtt_mapping && map_and_fenceable)
@@ -3832,7 +3849,7 @@ void i915_gem_l3_remap(struct drm_device *dev)
        if (!IS_IVYBRIDGE(dev))
                return;
 
-       if (!dev_priv->mm.l3_remap_info)
+       if (!dev_priv->l3_parity.remap_info)
                return;
 
        misccpctl = I915_READ(GEN7_MISCCPCTL);
@@ -3841,12 +3858,12 @@ void i915_gem_l3_remap(struct drm_device *dev)
 
        for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
                u32 remap = I915_READ(GEN7_L3LOG_BASE + i);
-               if (remap && remap != dev_priv->mm.l3_remap_info[i/4])
+               if (remap && remap != dev_priv->l3_parity.remap_info[i/4])
                        DRM_DEBUG("0x%x was already programmed to %x\n",
                                  GEN7_L3LOG_BASE + i, remap);
-               if (remap && !dev_priv->mm.l3_remap_info[i/4])
+               if (remap && !dev_priv->l3_parity.remap_info[i/4])
                        DRM_DEBUG_DRIVER("Clearing remapped register\n");
-               I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->mm.l3_remap_info[i/4]);
+               I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->l3_parity.remap_info[i/4]);
        }
 
        /* Make sure all the writes land before disabling dop clock gating */
@@ -3876,68 +3893,6 @@ void i915_gem_init_swizzling(struct drm_device *dev)
                I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
 }
 
-void i915_gem_init_ppgtt(struct drm_device *dev)
-{
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       uint32_t pd_offset;
-       struct intel_ring_buffer *ring;
-       struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
-       uint32_t __iomem *pd_addr;
-       uint32_t pd_entry;
-       int i;
-
-       if (!dev_priv->mm.aliasing_ppgtt)
-               return;
-
-
-       pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t);
-       for (i = 0; i < ppgtt->num_pd_entries; i++) {
-               dma_addr_t pt_addr;
-
-               if (dev_priv->mm.gtt->needs_dmar)
-                       pt_addr = ppgtt->pt_dma_addr[i];
-               else
-                       pt_addr = page_to_phys(ppgtt->pt_pages[i]);
-
-               pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
-               pd_entry |= GEN6_PDE_VALID;
-
-               writel(pd_entry, pd_addr + i);
-       }
-       readl(pd_addr);
-
-       pd_offset = ppgtt->pd_offset;
-       pd_offset /= 64; /* in cachelines, */
-       pd_offset <<= 16;
-
-       if (INTEL_INFO(dev)->gen == 6) {
-               uint32_t ecochk, gab_ctl, ecobits;
-
-               ecobits = I915_READ(GAC_ECO_BITS); 
-               I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
-
-               gab_ctl = I915_READ(GAB_CTL);
-               I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
-
-               ecochk = I915_READ(GAM_ECOCHK);
-               I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
-                                      ECOCHK_PPGTT_CACHE64B);
-               I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
-       } else if (INTEL_INFO(dev)->gen >= 7) {
-               I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B);
-               /* GFX_MODE is per-ring on gen7+ */
-       }
-
-       for_each_ring(ring, dev_priv, i) {
-               if (INTEL_INFO(dev)->gen >= 7)
-                       I915_WRITE(RING_MODE_GEN7(ring),
-                                  _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
-
-               I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
-               I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
-       }
-}
-
 static bool
 intel_enable_blt(struct drm_device *dev)
 {
@@ -3960,7 +3915,7 @@ i915_gem_init_hw(struct drm_device *dev)
        drm_i915_private_t *dev_priv = dev->dev_private;
        int ret;
 
-       if (!intel_enable_gtt())
+       if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
                return -EIO;
 
        if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1))
@@ -4295,7 +4250,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
                        page_cache_release(page);
                }
        }
-       intel_gtt_chipset_flush();
+       i915_gem_chipset_flush(dev);
 
        obj->phys_obj->cur_obj = NULL;
        obj->phys_obj = NULL;
@@ -4382,7 +4337,7 @@ i915_gem_phys_pwrite(struct drm_device *dev,
                        return -EFAULT;
        }
 
-       intel_gtt_chipset_flush();
+       i915_gem_chipset_flush(dev);
        return 0;
 }
 
@@ -4407,6 +4362,19 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file)
        spin_unlock(&file_priv->mm.lock);
 }
 
+static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
+{
+       if (!mutex_is_locked(mutex))
+               return false;
+
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
+       return mutex->owner == task;
+#else
+       /* Since UP may be pre-empted, we cannot assume that we own the lock */
+       return false;
+#endif
+}
+
 static int
 i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
 {
@@ -4417,10 +4385,15 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
        struct drm_device *dev = dev_priv->dev;
        struct drm_i915_gem_object *obj;
        int nr_to_scan = sc->nr_to_scan;
+       bool unlock = true;
        int cnt;
 
-       if (!mutex_trylock(&dev->struct_mutex))
-               return 0;
+       if (!mutex_trylock(&dev->struct_mutex)) {
+               if (!mutex_is_locked_by(&dev->struct_mutex, current))
+                       return 0;
+
+               unlock = false;
+       }
 
        if (nr_to_scan) {
                nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan);
@@ -4436,6 +4409,7 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
                if (obj->pin_count == 0 && obj->pages_pin_count == 0)
                        cnt += obj->base.size >> PAGE_SHIFT;
 
-       mutex_unlock(&dev->struct_mutex);
+       if (unlock)
+               mutex_unlock(&dev->struct_mutex);
        return cnt;
 }
index 05ed42f..a3f06bc 100644 (file)
@@ -146,7 +146,7 @@ create_hw_context(struct drm_device *dev,
        struct i915_hw_context *ctx;
        int ret, id;
 
-       ctx = kzalloc(sizeof(struct drm_i915_file_private), GFP_KERNEL);
+       ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
        if (ctx == NULL)
                return ERR_PTR(-ENOMEM);
 
@@ -410,9 +410,8 @@ static int do_switch(struct i915_hw_context *to)
         * MI_SET_CONTEXT instead of when the next seqno has completed.
         */
        if (from_obj != NULL) {
-               u32 seqno = i915_gem_next_request_seqno(ring);
                from_obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
-               i915_gem_object_move_to_active(from_obj, ring, seqno);
+               i915_gem_object_move_to_active(from_obj, ring);
                /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
                 * whole damn pipeline, we don't need to explicitly mark the
                 * object dirty. The only exception is that the context must be
index 3eea143..ee8f97f 100644 (file)
@@ -128,15 +128,6 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
                                         target_i915_obj->cache_level);
        }
 
-       /* The target buffer should have appeared before us in the
-        * exec_object list, so it should have a GTT space bound by now.
-        */
-       if (unlikely(target_offset == 0)) {
-               DRM_DEBUG("No GTT space found for object %d\n",
-                         reloc->target_handle);
-               return ret;
-       }
-
        /* Validate that the target is in a valid r/w GPU domain */
        if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
                DRM_DEBUG("reloc with multiple write domains: "
@@ -672,7 +663,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
        }
 
        if (flush_domains & I915_GEM_DOMAIN_CPU)
-               intel_gtt_chipset_flush();
+               i915_gem_chipset_flush(ring->dev);
 
        if (flush_domains & I915_GEM_DOMAIN_GTT)
                wmb();
@@ -722,8 +713,7 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
 
 static void
 i915_gem_execbuffer_move_to_active(struct list_head *objects,
-                                  struct intel_ring_buffer *ring,
-                                  u32 seqno)
+                                  struct intel_ring_buffer *ring)
 {
        struct drm_i915_gem_object *obj;
 
@@ -735,10 +725,10 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
                obj->base.write_domain = obj->base.pending_write_domain;
                obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
 
-               i915_gem_object_move_to_active(obj, ring, seqno);
+               i915_gem_object_move_to_active(obj, ring);
                if (obj->base.write_domain) {
                        obj->dirty = 1;
-                       obj->last_write_seqno = seqno;
+                       obj->last_write_seqno = intel_ring_get_seqno(ring);
                        if (obj->pin_count) /* check for potential scanout */
                                intel_mark_fb_busy(obj);
                }
@@ -798,8 +788,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        struct intel_ring_buffer *ring;
        u32 ctx_id = i915_execbuffer2_get_context_id(*args);
        u32 exec_start, exec_len;
-       u32 seqno;
        u32 mask;
+       u32 flags;
        int ret, mode, i;
 
        if (!i915_gem_check_execbuffer(args)) {
@@ -811,6 +801,14 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        if (ret)
                return ret;
 
+       flags = 0;
+       if (args->flags & I915_EXEC_SECURE) {
+               if (!file->is_master || !capable(CAP_SYS_ADMIN))
+                   return -EPERM;
+
+               flags |= I915_DISPATCH_SECURE;
+       }
+
        switch (args->flags & I915_EXEC_RING_MASK) {
        case I915_EXEC_DEFAULT:
        case I915_EXEC_RENDER:
@@ -983,26 +981,17 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        }
        batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
 
+       /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
+        * batch" bit. Hence we need to pin secure batches into the global gtt.
+        * hsw should have this fixed, but let's be paranoid and do it
+        * unconditionally for now. */
+       if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
+               i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
+
        ret = i915_gem_execbuffer_move_to_gpu(ring, &objects);
        if (ret)
                goto err;
 
-       seqno = i915_gem_next_request_seqno(ring);
-       for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++) {
-               if (seqno < ring->sync_seqno[i]) {
-                       /* The GPU can not handle its semaphore value wrapping,
-                        * so every billion or so execbuffers, we need to stall
-                        * the GPU in order to reset the counters.
-                        */
-                       ret = i915_gpu_idle(dev);
-                       if (ret)
-                               goto err;
-                       i915_gem_retire_requests(dev);
-
-                       BUG_ON(ring->sync_seqno[i]);
-               }
-       }
-
        ret = i915_switch_context(ring, file, ctx_id);
        if (ret)
                goto err;
@@ -1028,8 +1017,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                        goto err;
        }
 
-       trace_i915_gem_ring_dispatch(ring, seqno);
-
        exec_start = batch_obj->gtt_offset + args->batch_start_offset;
        exec_len = args->batch_len;
        if (cliprects) {
@@ -1040,17 +1027,22 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                                goto err;
 
                        ret = ring->dispatch_execbuffer(ring,
-                                                       exec_start, exec_len);
+                                                       exec_start, exec_len,
+                                                       flags);
                        if (ret)
                                goto err;
                }
        } else {
-               ret = ring->dispatch_execbuffer(ring, exec_start, exec_len);
+               ret = ring->dispatch_execbuffer(ring,
+                                               exec_start, exec_len,
+                                               flags);
                if (ret)
                        goto err;
        }
 
-       i915_gem_execbuffer_move_to_active(&objects, ring, seqno);
+       trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
+
+       i915_gem_execbuffer_move_to_active(&objects, ring);
        i915_gem_execbuffer_retire_commands(dev, file, ring);
 
 err:
index df470b5..2c150de 100644 (file)
 #include "i915_trace.h"
 #include "intel_drv.h"
 
+typedef uint32_t gtt_pte_t;
+
+/* PPGTT stuff */
+#define GEN6_GTT_ADDR_ENCODE(addr)     ((addr) | (((addr) >> 28) & 0xff0))
+
+#define GEN6_PDE_VALID                 (1 << 0)
+/* gen6+ has bit 11-4 for physical addr bit 39-32 */
+#define GEN6_PDE_ADDR_ENCODE(addr)     GEN6_GTT_ADDR_ENCODE(addr)
+
+#define GEN6_PTE_VALID                 (1 << 0)
+#define GEN6_PTE_UNCACHED              (1 << 1)
+#define HSW_PTE_UNCACHED               (0)
+#define GEN6_PTE_CACHE_LLC             (2 << 1)
+#define GEN6_PTE_CACHE_LLC_MLC         (3 << 1)
+#define GEN6_PTE_ADDR_ENCODE(addr)     GEN6_GTT_ADDR_ENCODE(addr)
+
+static inline gtt_pte_t pte_encode(struct drm_device *dev,
+                                  dma_addr_t addr,
+                                  enum i915_cache_level level)
+{
+       gtt_pte_t pte = GEN6_PTE_VALID;
+       pte |= GEN6_PTE_ADDR_ENCODE(addr);
+
+       switch (level) {
+       case I915_CACHE_LLC_MLC:
+               /* Haswell doesn't set L3 this way */
+               if (IS_HASWELL(dev))
+                       pte |= GEN6_PTE_CACHE_LLC;
+               else
+                       pte |= GEN6_PTE_CACHE_LLC_MLC;
+               break;
+       case I915_CACHE_LLC:
+               pte |= GEN6_PTE_CACHE_LLC;
+               break;
+       case I915_CACHE_NONE:
+               if (IS_HASWELL(dev))
+                       pte |= HSW_PTE_UNCACHED;
+               else
+                       pte |= GEN6_PTE_UNCACHED;
+               break;
+       default:
+               BUG();
+       }
+
+
+       return pte;
+}
+
 /* PPGTT support for Sandybdrige/Gen6 and later */
 static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
                                   unsigned first_entry,
                                   unsigned num_entries)
 {
-       uint32_t *pt_vaddr;
-       uint32_t scratch_pte;
+       gtt_pte_t *pt_vaddr;
+       gtt_pte_t scratch_pte;
        unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
        unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
        unsigned last_pte, i;
 
-       scratch_pte = GEN6_PTE_ADDR_ENCODE(ppgtt->scratch_page_dma_addr);
-       scratch_pte |= GEN6_PTE_VALID | GEN6_PTE_CACHE_LLC;
+       scratch_pte = pte_encode(ppgtt->dev, ppgtt->scratch_page_dma_addr,
+                                I915_CACHE_LLC);
 
        while (num_entries) {
                last_pte = first_pte + num_entries;
@@ -77,6 +125,7 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
        if (!ppgtt)
                return ret;
 
+       ppgtt->dev = dev;
        ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
        ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
                                  GFP_KERNEL);
@@ -118,7 +167,7 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
        i915_ppgtt_clear_range(ppgtt, 0,
                               ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
 
-       ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(uint32_t);
+       ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(gtt_pte_t);
 
        dev_priv->mm.aliasing_ppgtt = ppgtt;
 
@@ -168,9 +217,9 @@ void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
 static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
                                         const struct sg_table *pages,
                                         unsigned first_entry,
-                                        uint32_t pte_flags)
+                                        enum i915_cache_level cache_level)
 {
-       uint32_t *pt_vaddr, pte;
+       gtt_pte_t *pt_vaddr;
        unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
        unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
        unsigned i, j, m, segment_len;
@@ -188,8 +237,8 @@ static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
 
                for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) {
                        page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
-                       pte = GEN6_PTE_ADDR_ENCODE(page_addr);
-                       pt_vaddr[j] = pte | pte_flags;
+                       pt_vaddr[j] = pte_encode(ppgtt->dev, page_addr,
+                                                cache_level);
 
                        /* grab the next page */
                        if (++m == segment_len) {
@@ -213,29 +262,10 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
                            struct drm_i915_gem_object *obj,
                            enum i915_cache_level cache_level)
 {
-       uint32_t pte_flags = GEN6_PTE_VALID;
-
-       switch (cache_level) {
-       case I915_CACHE_LLC_MLC:
-               pte_flags |= GEN6_PTE_CACHE_LLC_MLC;
-               break;
-       case I915_CACHE_LLC:
-               pte_flags |= GEN6_PTE_CACHE_LLC;
-               break;
-       case I915_CACHE_NONE:
-               if (IS_HASWELL(obj->base.dev))
-                       pte_flags |= HSW_PTE_UNCACHED;
-               else
-                       pte_flags |= GEN6_PTE_UNCACHED;
-               break;
-       default:
-               BUG();
-       }
-
        i915_ppgtt_insert_sg_entries(ppgtt,
                                     obj->pages,
                                     obj->gtt_space->start >> PAGE_SHIFT,
-                                    pte_flags);
+                                    cache_level);
 }
 
 void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
@@ -246,23 +276,65 @@ void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
                               obj->base.size >> PAGE_SHIFT);
 }
 
-/* XXX kill agp_type! */
-static unsigned int cache_level_to_agp_type(struct drm_device *dev,
-                                           enum i915_cache_level cache_level)
+void i915_gem_init_ppgtt(struct drm_device *dev)
 {
-       switch (cache_level) {
-       case I915_CACHE_LLC_MLC:
-               if (INTEL_INFO(dev)->gen >= 6)
-                       return AGP_USER_CACHED_MEMORY_LLC_MLC;
-               /* Older chipsets do not have this extra level of CPU
-                * cacheing, so fallthrough and request the PTE simply
-                * as cached.
-                */
-       case I915_CACHE_LLC:
-               return AGP_USER_CACHED_MEMORY;
-       default:
-       case I915_CACHE_NONE:
-               return AGP_USER_MEMORY;
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       uint32_t pd_offset;
+       struct intel_ring_buffer *ring;
+       struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
+       uint32_t __iomem *pd_addr;
+       uint32_t pd_entry;
+       int i;
+
+       if (!dev_priv->mm.aliasing_ppgtt)
+               return;
+
+
+       pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t);
+       for (i = 0; i < ppgtt->num_pd_entries; i++) {
+               dma_addr_t pt_addr;
+
+               if (dev_priv->mm.gtt->needs_dmar)
+                       pt_addr = ppgtt->pt_dma_addr[i];
+               else
+                       pt_addr = page_to_phys(ppgtt->pt_pages[i]);
+
+               pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
+               pd_entry |= GEN6_PDE_VALID;
+
+               writel(pd_entry, pd_addr + i);
+       }
+       readl(pd_addr);
+
+       pd_offset = ppgtt->pd_offset;
+       pd_offset /= 64; /* in cachelines, */
+       pd_offset <<= 16;
+
+       if (INTEL_INFO(dev)->gen == 6) {
+               uint32_t ecochk, gab_ctl, ecobits;
+
+               ecobits = I915_READ(GAC_ECO_BITS);
+               I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
+
+               gab_ctl = I915_READ(GAB_CTL);
+               I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
+
+               ecochk = I915_READ(GAM_ECOCHK);
+               I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
+                                      ECOCHK_PPGTT_CACHE64B);
+               I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
+       } else if (INTEL_INFO(dev)->gen >= 7) {
+               I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B);
+               /* GFX_MODE is per-ring on gen7+ */
+       }
+
+       for_each_ring(ring, dev_priv, i) {
+               if (INTEL_INFO(dev)->gen >= 7)
+                       I915_WRITE(RING_MODE_GEN7(ring),
+                                  _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
+
+               I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
+               I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
        }
 }
 
@@ -288,13 +360,40 @@ static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
                dev_priv->mm.interruptible = interruptible;
 }
 
+
+static void i915_ggtt_clear_range(struct drm_device *dev,
+                                unsigned first_entry,
+                                unsigned num_entries)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       gtt_pte_t scratch_pte;
+       gtt_pte_t __iomem *gtt_base = dev_priv->mm.gtt->gtt + first_entry;
+       const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
+       int i;
+
+       if (INTEL_INFO(dev)->gen < 6) {
+               intel_gtt_clear_range(first_entry, num_entries);
+               return;
+       }
+
+       if (WARN(num_entries > max_entries,
+                "First entry = %d; Num entries = %d (max=%d)\n",
+                first_entry, num_entries, max_entries))
+               num_entries = max_entries;
+
+       scratch_pte = pte_encode(dev, dev_priv->mm.gtt->scratch_page_dma, I915_CACHE_LLC);
+       for (i = 0; i < num_entries; i++)
+               iowrite32(scratch_pte, &gtt_base[i]);
+       readl(gtt_base);
+}
+
 void i915_gem_restore_gtt_mappings(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj;
 
        /* First fill our portion of the GTT with scratch pages */
-       intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE,
+       i915_ggtt_clear_range(dev, dev_priv->mm.gtt_start / PAGE_SIZE,
                              (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
 
        list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
@@ -302,7 +401,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
                i915_gem_gtt_bind_object(obj, obj->cache_level);
        }
 
-       intel_gtt_chipset_flush();
+       i915_gem_chipset_flush(dev);
 }
 
 int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
@@ -318,21 +417,76 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
        return 0;
 }
 
+/*
+ * Binds an object into the global gtt with the specified cache level. The object
+ * will be accessible to the GPU via commands whose operands reference offsets
+ * within the global GTT as well as accessible by the GPU through the GMADR
+ * mapped BAR (dev_priv->mm.gtt->gtt).
+ */
+static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj,
+                                 enum i915_cache_level level)
+{
+       struct drm_device *dev = obj->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct sg_table *st = obj->pages;
+       struct scatterlist *sg = st->sgl;
+       const int first_entry = obj->gtt_space->start >> PAGE_SHIFT;
+       const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
+       gtt_pte_t __iomem *gtt_entries = dev_priv->mm.gtt->gtt + first_entry;
+       int unused, i = 0;
+       unsigned int len, m = 0;
+       dma_addr_t addr;
+
+       for_each_sg(st->sgl, sg, st->nents, unused) {
+               len = sg_dma_len(sg) >> PAGE_SHIFT;
+               for (m = 0; m < len; m++) {
+                       addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
+                       iowrite32(pte_encode(dev, addr, level), &gtt_entries[i]);
+                       i++;
+               }
+       }
+
+       BUG_ON(i > max_entries);
+       BUG_ON(i != obj->base.size / PAGE_SIZE);
+
+       /* XXX: This serves as a posting read to make sure that the PTE has
+        * actually been updated. There is some concern that even though
+        * registers and PTEs are within the same BAR that they are potentially
+        * of NUMA access patterns. Therefore, even with the way we assume
+        * hardware should work, we must keep this posting read for paranoia.
+        */
+       if (i != 0)
+               WARN_ON(readl(&gtt_entries[i-1]) != pte_encode(dev, addr, level));
+
+       /* This next bit makes the above posting read even more important. We
+        * want to flush the TLBs only after we're certain all the PTE updates
+        * have finished.
+        */
+       I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
+       POSTING_READ(GFX_FLSH_CNTL_GEN6);
+}
+
 void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
                              enum i915_cache_level cache_level)
 {
        struct drm_device *dev = obj->base.dev;
-       unsigned int agp_type = cache_level_to_agp_type(dev, cache_level);
+       if (INTEL_INFO(dev)->gen < 6) {
+               unsigned int flags = (cache_level == I915_CACHE_NONE) ?
+                       AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
+               intel_gtt_insert_sg_entries(obj->pages,
+                                           obj->gtt_space->start >> PAGE_SHIFT,
+                                           flags);
+       } else {
+               gen6_ggtt_bind_object(obj, cache_level);
+       }
 
-       intel_gtt_insert_sg_entries(obj->pages,
-                                   obj->gtt_space->start >> PAGE_SHIFT,
-                                   agp_type);
        obj->has_global_gtt_mapping = 1;
 }
 
 void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
 {
-       intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
+       i915_ggtt_clear_range(obj->base.dev,
+                             obj->gtt_space->start >> PAGE_SHIFT,
                              obj->base.size >> PAGE_SHIFT);
 
        obj->has_global_gtt_mapping = 0;
@@ -390,5 +544,165 @@ void i915_gem_init_global_gtt(struct drm_device *dev,
        dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
 
        /* ... but ensure that we clear the entire range. */
-       intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
+       i915_ggtt_clear_range(dev, start / PAGE_SIZE, (end-start) / PAGE_SIZE);
+}
+
+static int setup_scratch_page(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct page *page;
+       dma_addr_t dma_addr;
+
+       page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
+       if (page == NULL)
+               return -ENOMEM;
+       get_page(page);
+       set_pages_uc(page, 1);
+
+#ifdef CONFIG_INTEL_IOMMU
+       dma_addr = pci_map_page(dev->pdev, page, 0, PAGE_SIZE,
+                               PCI_DMA_BIDIRECTIONAL);
+       if (pci_dma_mapping_error(dev->pdev, dma_addr))
+               return -EINVAL;
+#else
+       dma_addr = page_to_phys(page);
+#endif
+       dev_priv->mm.gtt->scratch_page = page;
+       dev_priv->mm.gtt->scratch_page_dma = dma_addr;
+
+       return 0;
+}
+
+static void teardown_scratch_page(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       set_pages_wb(dev_priv->mm.gtt->scratch_page, 1);
+       pci_unmap_page(dev->pdev, dev_priv->mm.gtt->scratch_page_dma,
+                      PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+       put_page(dev_priv->mm.gtt->scratch_page);
+       __free_page(dev_priv->mm.gtt->scratch_page);
+}
+
+static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
+{
+       snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
+       snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
+       return snb_gmch_ctl << 20;
+}
+
+static inline unsigned int gen6_get_stolen_size(u16 snb_gmch_ctl)
+{
+       snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
+       snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
+       return snb_gmch_ctl << 25; /* 32 MB units */
+}
+
+static inline unsigned int gen7_get_stolen_size(u16 snb_gmch_ctl)
+{
+       static const int stolen_decoder[] = {
+               0, 0, 0, 0, 0, 32, 48, 64, 128, 256, 96, 160, 224, 352};
+       snb_gmch_ctl >>= IVB_GMCH_GMS_SHIFT;
+       snb_gmch_ctl &= IVB_GMCH_GMS_MASK;
+       return stolen_decoder[snb_gmch_ctl] << 20;
+}
+
+int i915_gem_gtt_init(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       phys_addr_t gtt_bus_addr;
+       u16 snb_gmch_ctl;
+       int ret;
+
+       /* On modern platforms we need not worry ourself with the legacy
+        * hostbridge query stuff. Skip it entirely
+        */
+       if (INTEL_INFO(dev)->gen < 6) {
+               ret = intel_gmch_probe(dev_priv->bridge_dev, dev->pdev, NULL);
+               if (!ret) {
+                       DRM_ERROR("failed to set up gmch\n");
+                       return -EIO;
+               }
+
+               dev_priv->mm.gtt = intel_gtt_get();
+               if (!dev_priv->mm.gtt) {
+                       DRM_ERROR("Failed to initialize GTT\n");
+                       intel_gmch_remove();
+                       return -ENODEV;
+               }
+               return 0;
+       }
+
+       dev_priv->mm.gtt = kzalloc(sizeof(*dev_priv->mm.gtt), GFP_KERNEL);
+       if (!dev_priv->mm.gtt)
+               return -ENOMEM;
+
+       if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
+               pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
+
+#ifdef CONFIG_INTEL_IOMMU
+       dev_priv->mm.gtt->needs_dmar = 1;
+#endif
+
+       /* For GEN6+ the PTEs for the ggtt live at 2MB + BAR0 */
+       gtt_bus_addr = pci_resource_start(dev->pdev, 0) + (2<<20);
+       dev_priv->mm.gtt->gma_bus_addr = pci_resource_start(dev->pdev, 2);
+
+       /* i9xx_setup */
+       pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
+       dev_priv->mm.gtt->gtt_total_entries =
+               gen6_get_total_gtt_size(snb_gmch_ctl) / sizeof(gtt_pte_t);
+       if (INTEL_INFO(dev)->gen < 7)
+               dev_priv->mm.gtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl);
+       else
+               dev_priv->mm.gtt->stolen_size = gen7_get_stolen_size(snb_gmch_ctl);
+
+       dev_priv->mm.gtt->gtt_mappable_entries = pci_resource_len(dev->pdev, 2) >> PAGE_SHIFT;
+       /* 64/512MB is the current min/max we actually know of, but this is just a
+        * coarse sanity check.
+        */
+       if ((dev_priv->mm.gtt->gtt_mappable_entries >> 8) < 64 ||
+           dev_priv->mm.gtt->gtt_mappable_entries > dev_priv->mm.gtt->gtt_total_entries) {
+               DRM_ERROR("Unknown GMADR entries (%d)\n",
+                         dev_priv->mm.gtt->gtt_mappable_entries);
+               ret = -ENXIO;
+               goto err_out;
+       }
+
+       ret = setup_scratch_page(dev);
+       if (ret) {
+               DRM_ERROR("Scratch setup failed\n");
+               goto err_out;
+       }
+
+       dev_priv->mm.gtt->gtt = ioremap_wc(gtt_bus_addr,
+                                          dev_priv->mm.gtt->gtt_total_entries * sizeof(gtt_pte_t));
+       if (!dev_priv->mm.gtt->gtt) {
+               DRM_ERROR("Failed to map the gtt page table\n");
+               teardown_scratch_page(dev);
+               ret = -ENOMEM;
+               goto err_out;
+       }
+
+       /* GMADR is the PCI aperture used by SW to access tiled GFX surfaces in a linear fashion. */
+       DRM_INFO("Memory usable by graphics device = %dM\n", dev_priv->mm.gtt->gtt_total_entries >> 8);
+       DRM_DEBUG_DRIVER("GMADR size = %dM\n", dev_priv->mm.gtt->gtt_mappable_entries >> 8);
+       DRM_DEBUG_DRIVER("GTT stolen size = %dM\n", dev_priv->mm.gtt->stolen_size >> 20);
+
+       return 0;
+
+err_out:
+       kfree(dev_priv->mm.gtt);
+       if (INTEL_INFO(dev)->gen < 6)
+               intel_gmch_remove();
+       return ret;
+}
+
+void i915_gem_gtt_fini(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       iounmap(dev_priv->mm.gtt->gtt);
+       teardown_scratch_page(dev);
+       if (INTEL_INFO(dev)->gen < 6)
+               intel_gmch_remove();
+       kfree(dev_priv->mm.gtt);
 }
index 32e1bda..a4dc97f 100644 (file)
@@ -122,7 +122,10 @@ static int
 i915_pipe_enabled(struct drm_device *dev, int pipe)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-       return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
+       enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+                                                                     pipe);
+
+       return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE;
 }
 
 /* Called from drm generic code, passed a 'crtc', which
@@ -182,6 +185,8 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
        int vbl_start, vbl_end, htotal, vtotal;
        bool in_vbl = true;
        int ret = 0;
+       enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+                                                                     pipe);
 
        if (!i915_pipe_enabled(dev, pipe)) {
                DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
@@ -190,7 +195,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
        }
 
        /* Get vtotal. */
-       vtotal = 1 + ((I915_READ(VTOTAL(pipe)) >> 16) & 0x1fff);
+       vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
 
        if (INTEL_INFO(dev)->gen >= 4) {
                /* No obvious pixelcount register. Only query vertical
@@ -210,13 +215,13 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
                 */
                position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
 
-               htotal = 1 + ((I915_READ(HTOTAL(pipe)) >> 16) & 0x1fff);
+               htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
                *vpos = position / htotal;
                *hpos = position - (*vpos * htotal);
        }
 
        /* Query vblank area. */
-       vbl = I915_READ(VBLANK(pipe));
+       vbl = I915_READ(VBLANK(cpu_transcoder));
 
        /* Test position against vblank region. */
        vbl_start = vbl & 0x1fff;
@@ -352,8 +357,7 @@ static void notify_ring(struct drm_device *dev,
        if (i915_enable_hangcheck) {
                dev_priv->hangcheck_count = 0;
                mod_timer(&dev_priv->hangcheck_timer,
-                         jiffies +
-                         msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
+                         round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
        }
 }
 
@@ -374,7 +378,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
        if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
                return;
 
-       mutex_lock(&dev_priv->dev->struct_mutex);
+       mutex_lock(&dev_priv->rps.hw_lock);
 
        if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
                new_delay = dev_priv->rps.cur_delay + 1;
@@ -389,7 +393,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
                gen6_set_rps(dev_priv->dev, new_delay);
        }
 
-       mutex_unlock(&dev_priv->dev->struct_mutex);
+       mutex_unlock(&dev_priv->rps.hw_lock);
 }
 
 
@@ -405,7 +409,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
 static void ivybridge_parity_work(struct work_struct *work)
 {
        drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
-                                                   parity_error_work);
+                                                   l3_parity.error_work);
        u32 error_status, row, bank, subbank;
        char *parity_event[5];
        uint32_t misccpctl;
@@ -469,7 +473,7 @@ static void ivybridge_handle_parity_error(struct drm_device *dev)
        I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
        spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
-       queue_work(dev_priv->wq, &dev_priv->parity_error_work);
+       queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
 }
 
 static void snb_gt_irq_handler(struct drm_device *dev,
@@ -520,7 +524,7 @@ static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
        queue_work(dev_priv->wq, &dev_priv->rps.work);
 }
 
-static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
+static irqreturn_t valleyview_irq_handler(int irq, void *arg)
 {
        struct drm_device *dev = (struct drm_device *) arg;
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -606,6 +610,9 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        int pipe;
 
+       if (pch_iir & SDE_HOTPLUG_MASK)
+               queue_work(dev_priv->wq, &dev_priv->hotplug_work);
+
        if (pch_iir & SDE_AUDIO_POWER_MASK)
                DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
                                 (pch_iir & SDE_AUDIO_POWER_MASK) >>
@@ -646,6 +653,9 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        int pipe;
 
+       if (pch_iir & SDE_HOTPLUG_MASK_CPT)
+               queue_work(dev_priv->wq, &dev_priv->hotplug_work);
+
        if (pch_iir & SDE_AUDIO_POWER_MASK_CPT)
                DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
                                 (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
@@ -670,7 +680,7 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
                                         I915_READ(FDI_RX_IIR(pipe)));
 }
 
-static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
+static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
 {
        struct drm_device *dev = (struct drm_device *) arg;
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -709,8 +719,6 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
                if (de_iir & DE_PCH_EVENT_IVB) {
                        u32 pch_iir = I915_READ(SDEIIR);
 
-                       if (pch_iir & SDE_HOTPLUG_MASK_CPT)
-                               queue_work(dev_priv->wq, &dev_priv->hotplug_work);
                        cpt_irq_handler(dev, pch_iir);
 
                        /* clear PCH hotplug event before clear CPU irq */
@@ -745,13 +753,12 @@ static void ilk_gt_irq_handler(struct drm_device *dev,
                notify_ring(dev, &dev_priv->ring[VCS]);
 }
 
-static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
+static irqreturn_t ironlake_irq_handler(int irq, void *arg)
 {
        struct drm_device *dev = (struct drm_device *) arg;
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        int ret = IRQ_NONE;
        u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
-       u32 hotplug_mask;
 
        atomic_inc(&dev_priv->irq_received);
 
@@ -769,11 +776,6 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
            (!IS_GEN6(dev) || pm_iir == 0))
                goto done;
 
-       if (HAS_PCH_CPT(dev))
-               hotplug_mask = SDE_HOTPLUG_MASK_CPT;
-       else
-               hotplug_mask = SDE_HOTPLUG_MASK;
-
        ret = IRQ_HANDLED;
 
        if (IS_GEN5(dev))
@@ -802,8 +804,6 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
 
        /* check event from PCH */
        if (de_iir & DE_PCH_EVENT) {
-               if (pch_iir & hotplug_mask)
-                       queue_work(dev_priv->wq, &dev_priv->hotplug_work);
                if (HAS_PCH_CPT(dev))
                        cpt_irq_handler(dev, pch_iir);
                else
@@ -1120,6 +1120,8 @@ static void i915_record_ring_state(struct drm_device *dev,
                        = I915_READ(RING_SYNC_0(ring->mmio_base));
                error->semaphore_mboxes[ring->id][1]
                        = I915_READ(RING_SYNC_1(ring->mmio_base));
+               error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
+               error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
        }
 
        if (INTEL_INFO(dev)->gen >= 4) {
@@ -1464,7 +1466,9 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
        spin_lock_irqsave(&dev->event_lock, flags);
        work = intel_crtc->unpin_work;
 
-       if (work == NULL || work->pending || !work->enable_stall_check) {
+       if (work == NULL ||
+           atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
+           !work->enable_stall_check) {
                /* Either the pending flip IRQ arrived, or we're too early. Don't check */
                spin_unlock_irqrestore(&dev->event_lock, flags);
                return;
@@ -1751,7 +1755,7 @@ void i915_hangcheck_elapsed(unsigned long data)
 repeat:
        /* Reset timer case chip hangs without another request being added */
        mod_timer(&dev_priv->hangcheck_timer,
-                 jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
+                 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
 }
 
 /* drm_dma.h hooks
@@ -1956,6 +1960,7 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
        u32 enable_mask;
        u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
        u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
+       u32 render_irqs;
        u16 msid;
 
        enable_mask = I915_DISPLAY_PORT_INTERRUPT;
@@ -1995,21 +2000,12 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
        I915_WRITE(VLV_IIR, 0xffffffff);
        I915_WRITE(VLV_IIR, 0xffffffff);
 
-       dev_priv->gt_irq_mask = ~0;
-
-       I915_WRITE(GTIIR, I915_READ(GTIIR));
        I915_WRITE(GTIIR, I915_READ(GTIIR));
        I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
-       I915_WRITE(GTIER, GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT |
-                  GT_GEN6_BLT_CS_ERROR_INTERRUPT |
-                  GT_GEN6_BLT_USER_INTERRUPT |
-                  GT_GEN6_BSD_USER_INTERRUPT |
-                  GT_GEN6_BSD_CS_ERROR_INTERRUPT |
-                  GT_GEN7_L3_PARITY_ERROR_INTERRUPT |
-                  GT_PIPE_NOTIFY |
-                  GT_RENDER_CS_ERROR_INTERRUPT |
-                  GT_SYNC_STATUS |
-                  GT_USER_INTERRUPT);
+
+       render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
+               GEN6_BLITTER_USER_INTERRUPT;
+       I915_WRITE(GTIER, render_irqs);
        POSTING_READ(GTIER);
 
        /* ack & enable invalid PTE error interrupts */
@@ -2019,7 +2015,6 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
 #endif
 
        I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
-#if 0 /* FIXME: check register definitions; some have moved */
        /* Note HDMI and DP share bits */
        if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
                hotplug_en |= HDMIB_HOTPLUG_INT_EN;
@@ -2027,15 +2022,14 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
                hotplug_en |= HDMIC_HOTPLUG_INT_EN;
        if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
                hotplug_en |= HDMID_HOTPLUG_INT_EN;
-       if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
+       if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
                hotplug_en |= SDVOC_HOTPLUG_INT_EN;
-       if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
+       if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
                hotplug_en |= SDVOB_HOTPLUG_INT_EN;
        if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
                hotplug_en |= CRT_HOTPLUG_INT_EN;
                hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
        }
-#endif
 
        I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
 
@@ -2129,7 +2123,7 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
        return 0;
 }
 
-static irqreturn_t i8xx_irq_handler(DRM_IRQ_ARGS)
+static irqreturn_t i8xx_irq_handler(int irq, void *arg)
 {
        struct drm_device *dev = (struct drm_device *) arg;
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -2307,7 +2301,7 @@ static int i915_irq_postinstall(struct drm_device *dev)
        return 0;
 }
 
-static irqreturn_t i915_irq_handler(DRM_IRQ_ARGS)
+static irqreturn_t i915_irq_handler(int irq, void *arg)
 {
        struct drm_device *dev = (struct drm_device *) arg;
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -2545,7 +2539,7 @@ static int i965_irq_postinstall(struct drm_device *dev)
        return 0;
 }
 
-static irqreturn_t i965_irq_handler(DRM_IRQ_ARGS)
+static irqreturn_t i965_irq_handler(int irq, void *arg)
 {
        struct drm_device *dev = (struct drm_device *) arg;
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -2691,7 +2685,7 @@ void intel_irq_init(struct drm_device *dev)
        INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
        INIT_WORK(&dev_priv->error_work, i915_error_work_func);
        INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
-       INIT_WORK(&dev_priv->parity_error_work, ivybridge_parity_work);
+       INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
 
        dev->driver->get_vblank_counter = i915_get_vblank_counter;
        dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
index a4162dd..3f75cfa 100644 (file)
@@ -26,6 +26,7 @@
 #define _I915_REG_H_
 
 #define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
+#define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a)))
 
 #define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
 
  */
 #define INTEL_GMCH_CTRL                0x52
 #define INTEL_GMCH_VGA_DISABLE  (1 << 1)
+#define SNB_GMCH_CTRL          0x50
+#define    SNB_GMCH_GGMS_SHIFT 8 /* GTT Graphics Memory Size */
+#define    SNB_GMCH_GGMS_MASK  0x3
+#define    SNB_GMCH_GMS_SHIFT   3 /* Graphics Mode Select */
+#define    SNB_GMCH_GMS_MASK    0x1f
+#define    IVB_GMCH_GMS_SHIFT   4
+#define    IVB_GMCH_GMS_MASK    0xf
+
 
 /* PCI config space */
 
 #define  GEN6_GRDOM_MEDIA              (1 << 2)
 #define  GEN6_GRDOM_BLT                        (1 << 3)
 
-/* PPGTT stuff */
-#define GEN6_GTT_ADDR_ENCODE(addr)     ((addr) | (((addr) >> 28) & 0xff0))
-
-#define GEN6_PDE_VALID                 (1 << 0)
-#define GEN6_PDE_LARGE_PAGE            (2 << 0) /* use 32kb pages */
-/* gen6+ has bit 11-4 for physical addr bit 39-32 */
-#define GEN6_PDE_ADDR_ENCODE(addr)     GEN6_GTT_ADDR_ENCODE(addr)
-
-#define GEN6_PTE_VALID                 (1 << 0)
-#define GEN6_PTE_UNCACHED              (1 << 1)
-#define HSW_PTE_UNCACHED               (0)
-#define GEN6_PTE_CACHE_LLC             (2 << 1)
-#define GEN6_PTE_CACHE_LLC_MLC         (3 << 1)
-#define GEN6_PTE_CACHE_BITS            (3 << 1)
-#define GEN6_PTE_GFDT                  (1 << 3)
-#define GEN6_PTE_ADDR_ENCODE(addr)     GEN6_GTT_ADDR_ENCODE(addr)
-
 #define RING_PP_DIR_BASE(ring)         ((ring)->mmio_base+0x228)
 #define RING_PP_DIR_BASE_READ(ring)    ((ring)->mmio_base+0x518)
 #define RING_PP_DIR_DCLV(ring)         ((ring)->mmio_base+0x220)
  */
 #define MI_LOAD_REGISTER_IMM(x)        MI_INSTR(0x22, 2*x-1)
 #define MI_FLUSH_DW            MI_INSTR(0x26, 1) /* for GEN6 */
-#define   MI_INVALIDATE_TLB    (1<<18)
-#define   MI_INVALIDATE_BSD    (1<<7)
+#define   MI_FLUSH_DW_STORE_INDEX      (1<<21)
+#define   MI_INVALIDATE_TLB            (1<<18)
+#define   MI_FLUSH_DW_OP_STOREDW       (1<<14)
+#define   MI_INVALIDATE_BSD            (1<<7)
+#define   MI_FLUSH_DW_USE_GTT          (1<<2)
+#define   MI_FLUSH_DW_USE_PPGTT                (0<<2)
 #define MI_BATCH_BUFFER                MI_INSTR(0x30, 1)
-#define   MI_BATCH_NON_SECURE  (1)
-#define   MI_BATCH_NON_SECURE_I965 (1<<8)
+#define   MI_BATCH_NON_SECURE          (1)
+/* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */
+#define   MI_BATCH_NON_SECURE_I965     (1<<8)
+#define   MI_BATCH_PPGTT_HSW           (1<<8)
+#define   MI_BATCH_NON_SECURE_HSW      (1<<13)
 #define MI_BATCH_BUFFER_START  MI_INSTR(0x31, 0)
 #define   MI_BATCH_GTT             (2<<6) /* aliased with (1<<7) on gen4 */
 #define MI_SEMAPHORE_MBOX      MI_INSTR(0x16, 1) /* gen6+ */
 #define   DPIO_PLL_MODESEL_SHIFT       24 /* 3 bits */
 #define   DPIO_BIAS_CURRENT_CTL_SHIFT  21 /* 3 bits, always 0x7 */
 #define   DPIO_PLL_REFCLK_SEL_SHIFT    16 /* 2 bits */
+#define   DPIO_PLL_REFCLK_SEL_MASK     3
 #define   DPIO_DRIVER_CTL_SHIFT                12 /* always set to 0x8 */
 #define   DPIO_CLK_BIAS_CTL_SHIFT      8 /* always set to 0x5 */
 #define _DPIO_REFSFR_B                 0x8034
 
 #define DPIO_FASTCLK_DISABLE           0x8100
 
+#define DPIO_DATA_CHANNEL1             0x8220
+#define DPIO_DATA_CHANNEL2             0x8420
+
 /*
  * Fence registers
  */
  */
 # define _3D_CHICKEN2_WM_READ_PIPELINED                        (1 << 14)
 #define _3D_CHICKEN3   0x02090
+#define  _3D_CHICKEN_SF_DISABLE_OBJEND_CULL            (1 << 10)
 #define  _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL         (1 << 5)
 
 #define MI_MODE                0x0209c
 #define IIR            0x020a4
 #define IMR            0x020a8
 #define ISR            0x020ac
+#define VLV_GUNIT_CLOCK_GATE   0x182060
+#define   GCFG_DIS             (1<<8)
 #define VLV_IIR_RW     0x182084
 #define VLV_IER                0x1820a0
 #define VLV_IIR                0x1820a4
 #define   MI_ARB_DISPLAY_PRIORITY_B_A          (1 << 0)        /* display B > display A */
 
 #define CACHE_MODE_0   0x02120 /* 915+ only */
+#define   CM0_PIPELINED_RENDER_FLUSH_DISABLE (1<<8)
 #define   CM0_IZ_OPT_DISABLE      (1<<6)
 #define   CM0_ZR_OPT_DISABLE      (1<<5)
 #define          CM0_STC_EVICT_DISABLE_LRA_SNB (1<<5)
 #define   CM0_RC_OP_FLUSH_DISABLE (1<<0)
 #define BB_ADDR                0x02140 /* 8 bytes */
 #define GFX_FLSH_CNTL  0x02170 /* 915+ only */
+#define GFX_FLSH_CNTL_GEN6     0x101008
+#define   GFX_FLSH_CNTL_EN     (1<<0)
 #define ECOSKPD                0x021d0
 #define   ECO_GATING_CX_ONLY   (1<<3)
 #define   ECO_FLIP_DONE                (1<<0)
 #define _VSYNCSHIFT_B  0x61028
 
 
-#define HTOTAL(pipe) _PIPE(pipe, _HTOTAL_A, _HTOTAL_B)
-#define HBLANK(pipe) _PIPE(pipe, _HBLANK_A, _HBLANK_B)
-#define HSYNC(pipe) _PIPE(pipe, _HSYNC_A, _HSYNC_B)
-#define VTOTAL(pipe) _PIPE(pipe, _VTOTAL_A, _VTOTAL_B)
-#define VBLANK(pipe) _PIPE(pipe, _VBLANK_A, _VBLANK_B)
-#define VSYNC(pipe) _PIPE(pipe, _VSYNC_A, _VSYNC_B)
+#define HTOTAL(trans) _TRANSCODER(trans, _HTOTAL_A, _HTOTAL_B)
+#define HBLANK(trans) _TRANSCODER(trans, _HBLANK_A, _HBLANK_B)
+#define HSYNC(trans) _TRANSCODER(trans, _HSYNC_A, _HSYNC_B)
+#define VTOTAL(trans) _TRANSCODER(trans, _VTOTAL_A, _VTOTAL_B)
+#define VBLANK(trans) _TRANSCODER(trans, _VBLANK_A, _VBLANK_B)
+#define VSYNC(trans) _TRANSCODER(trans, _VSYNC_A, _VSYNC_B)
 #define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B)
-#define VSYNCSHIFT(pipe) _PIPE(pipe, _VSYNCSHIFT_A, _VSYNCSHIFT_B)
+#define VSYNCSHIFT(trans) _TRANSCODER(trans, _VSYNCSHIFT_A, _VSYNCSHIFT_B)
 
 /* VGA port control */
 #define ADPA                   0x61100
 #define   PIPECONF_GAMMA               (1<<24)
 #define   PIPECONF_FORCE_BORDER        (1<<25)
 #define   PIPECONF_INTERLACE_MASK      (7 << 21)
+#define   PIPECONF_INTERLACE_MASK_HSW  (3 << 21)
 /* Note that pre-gen3 does not support interlaced display directly. Panel
  * fitting must be disabled on pre-ilk for interlaced. */
 #define   PIPECONF_PROGRESSIVE                 (0 << 21)
 #define   PIPE_12BPC                           (3 << 5)
 
 #define PIPESRC(pipe) _PIPE(pipe, _PIPEASRC, _PIPEBSRC)
-#define PIPECONF(pipe) _PIPE(pipe, _PIPEACONF, _PIPEBCONF)
+#define PIPECONF(tran) _TRANSCODER(tran, _PIPEACONF, _PIPEBCONF)
 #define PIPEDSL(pipe)  _PIPE(pipe, _PIPEADSL, _PIPEBDSL)
 #define PIPEFRAME(pipe) _PIPE(pipe, _PIPEAFRAMEHIGH, _PIPEBFRAMEHIGH)
 #define PIPEFRAMEPIXEL(pipe)  _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL)
 #define   DISPPLANE_GAMMA_ENABLE               (1<<30)
 #define   DISPPLANE_GAMMA_DISABLE              0
 #define   DISPPLANE_PIXFORMAT_MASK             (0xf<<26)
+#define   DISPPLANE_YUV422                     (0x0<<26)
 #define   DISPPLANE_8BPP                       (0x2<<26)
-#define   DISPPLANE_15_16BPP                   (0x4<<26)
-#define   DISPPLANE_16BPP                      (0x5<<26)
-#define   DISPPLANE_32BPP_NO_ALPHA             (0x6<<26)
-#define   DISPPLANE_32BPP                      (0x7<<26)
-#define   DISPPLANE_32BPP_30BIT_NO_ALPHA       (0xa<<26)
+#define   DISPPLANE_BGRA555                    (0x3<<26)
+#define   DISPPLANE_BGRX555                    (0x4<<26)
+#define   DISPPLANE_BGRX565                    (0x5<<26)
+#define   DISPPLANE_BGRX888                    (0x6<<26)
+#define   DISPPLANE_BGRA888                    (0x7<<26)
+#define   DISPPLANE_RGBX101010                 (0x8<<26)
+#define   DISPPLANE_RGBA101010                 (0x9<<26)
+#define   DISPPLANE_BGRX101010                 (0xa<<26)
+#define   DISPPLANE_RGBX161616                 (0xc<<26)
+#define   DISPPLANE_RGBX888                    (0xe<<26)
+#define   DISPPLANE_RGBA888                    (0xf<<26)
 #define   DISPPLANE_STEREO_ENABLE              (1<<25)
 #define   DISPPLANE_STEREO_DISABLE             0
 #define   DISPPLANE_SEL_PIPE_SHIFT             24
 #define _DSPASIZE              0x70190
 #define _DSPASURF              0x7019C /* 965+ only */
 #define _DSPATILEOFF           0x701A4 /* 965+ only */
+#define _DSPAOFFSET            0x701A4 /* HSW */
+#define _DSPASURFLIVE          0x701AC
 
 #define DSPCNTR(plane) _PIPE(plane, _DSPACNTR, _DSPBCNTR)
 #define DSPADDR(plane) _PIPE(plane, _DSPAADDR, _DSPBADDR)
 #define DSPSURF(plane) _PIPE(plane, _DSPASURF, _DSPBSURF)
 #define DSPTILEOFF(plane) _PIPE(plane, _DSPATILEOFF, _DSPBTILEOFF)
 #define DSPLINOFF(plane) DSPADDR(plane)
+#define DSPOFFSET(plane) _PIPE(plane, _DSPAOFFSET, _DSPBOFFSET)
+#define DSPSURFLIVE(plane) _PIPE(plane, _DSPASURFLIVE, _DSPBSURFLIVE)
 
 /* Display/Sprite base address macros */
 #define DISP_BASEADDR_MASK     (0xfffff000)
 #define _DSPBSIZE              0x71190
 #define _DSPBSURF              0x7119C
 #define _DSPBTILEOFF           0x711A4
+#define _DSPBOFFSET            0x711A4
+#define _DSPBSURFLIVE          0x711AC
 
 /* Sprite A control */
 #define _DVSACNTR              0x72180
 #define DVSTILEOFF(pipe) _PIPE(pipe, _DVSATILEOFF, _DVSBTILEOFF)
 #define DVSKEYVAL(pipe) _PIPE(pipe, _DVSAKEYVAL, _DVSBKEYVAL)
 #define DVSKEYMSK(pipe) _PIPE(pipe, _DVSAKEYMSK, _DVSBKEYMSK)
+#define DVSSURFLIVE(pipe) _PIPE(pipe, _DVSASURFLIVE, _DVSBSURFLIVE)
 
 #define _SPRA_CTL              0x70280
 #define   SPRITE_ENABLE                        (1<<31)
 #define _SPRA_SURF             0x7029c
 #define _SPRA_KEYMAX           0x702a0
 #define _SPRA_TILEOFF          0x702a4
+#define _SPRA_OFFSET           0x702a4
+#define _SPRA_SURFLIVE         0x702ac
 #define _SPRA_SCALE            0x70304
 #define   SPRITE_SCALE_ENABLE  (1<<31)
 #define   SPRITE_FILTER_MASK   (3<<29)
 #define _SPRB_SURF             0x7129c
 #define _SPRB_KEYMAX           0x712a0
 #define _SPRB_TILEOFF          0x712a4
+#define _SPRB_OFFSET           0x712a4
+#define _SPRB_SURFLIVE         0x712ac
 #define _SPRB_SCALE            0x71304
 #define _SPRB_GAMC             0x71400
 
 #define SPRSURF(pipe) _PIPE(pipe, _SPRA_SURF, _SPRB_SURF)
 #define SPRKEYMAX(pipe) _PIPE(pipe, _SPRA_KEYMAX, _SPRB_KEYMAX)
 #define SPRTILEOFF(pipe) _PIPE(pipe, _SPRA_TILEOFF, _SPRB_TILEOFF)
+#define SPROFFSET(pipe) _PIPE(pipe, _SPRA_OFFSET, _SPRB_OFFSET)
 #define SPRSCALE(pipe) _PIPE(pipe, _SPRA_SCALE, _SPRB_SCALE)
 #define SPRGAMC(pipe) _PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC)
+#define SPRSURFLIVE(pipe) _PIPE(pipe, _SPRA_SURFLIVE, _SPRB_SURFLIVE)
 
 /* VBIOS regs */
 #define VGACNTRL               0x71400
 #define DISPLAY_PORT_PLL_BIOS_1         0x46010
 #define DISPLAY_PORT_PLL_BIOS_2         0x46014
 
-#define PCH_DSPCLK_GATE_D      0x42020
-# define DPFCUNIT_CLOCK_GATE_DISABLE           (1 << 9)
-# define DPFCRUNIT_CLOCK_GATE_DISABLE          (1 << 8)
-# define DPFDUNIT_CLOCK_GATE_DISABLE           (1 << 7)
-# define DPARBUNIT_CLOCK_GATE_DISABLE          (1 << 5)
-
 #define PCH_3DCGDIS0           0x46020
 # define MARIUNIT_CLOCK_GATE_DISABLE           (1 << 18)
 # define SVSMUNIT_CLOCK_GATE_DISABLE           (1 << 1)
 #define _PIPEB_LINK_M2           0x61048
 #define _PIPEB_LINK_N2           0x6104c
 
-#define PIPE_DATA_M1(pipe) _PIPE(pipe, _PIPEA_DATA_M1, _PIPEB_DATA_M1)
-#define PIPE_DATA_N1(pipe) _PIPE(pipe, _PIPEA_DATA_N1, _PIPEB_DATA_N1)
-#define PIPE_DATA_M2(pipe) _PIPE(pipe, _PIPEA_DATA_M2, _PIPEB_DATA_M2)
-#define PIPE_DATA_N2(pipe) _PIPE(pipe, _PIPEA_DATA_N2, _PIPEB_DATA_N2)
-#define PIPE_LINK_M1(pipe) _PIPE(pipe, _PIPEA_LINK_M1, _PIPEB_LINK_M1)
-#define PIPE_LINK_N1(pipe) _PIPE(pipe, _PIPEA_LINK_N1, _PIPEB_LINK_N1)
-#define PIPE_LINK_M2(pipe) _PIPE(pipe, _PIPEA_LINK_M2, _PIPEB_LINK_M2)
-#define PIPE_LINK_N2(pipe) _PIPE(pipe, _PIPEA_LINK_N2, _PIPEB_LINK_N2)
+#define PIPE_DATA_M1(tran) _TRANSCODER(tran, _PIPEA_DATA_M1, _PIPEB_DATA_M1)
+#define PIPE_DATA_N1(tran) _TRANSCODER(tran, _PIPEA_DATA_N1, _PIPEB_DATA_N1)
+#define PIPE_DATA_M2(tran) _TRANSCODER(tran, _PIPEA_DATA_M2, _PIPEB_DATA_M2)
+#define PIPE_DATA_N2(tran) _TRANSCODER(tran, _PIPEA_DATA_N2, _PIPEB_DATA_N2)
+#define PIPE_LINK_M1(tran) _TRANSCODER(tran, _PIPEA_LINK_M1, _PIPEB_LINK_M1)
+#define PIPE_LINK_N1(tran) _TRANSCODER(tran, _PIPEA_LINK_N1, _PIPEB_LINK_N1)
+#define PIPE_LINK_M2(tran) _TRANSCODER(tran, _PIPEA_LINK_M2, _PIPEB_LINK_M2)
+#define PIPE_LINK_N2(tran) _TRANSCODER(tran, _PIPEA_LINK_N2, _PIPEB_LINK_N2)
 
 /* CPU panel fitter */
 /* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */
 #define _PFA_CTL_1               0x68080
 #define _PFB_CTL_1               0x68880
 #define  PF_ENABLE              (1<<31)
+#define  PF_PIPE_SEL_MASK_IVB  (3<<29)
+#define  PF_PIPE_SEL_IVB(pipe) ((pipe)<<29)
 #define  PF_FILTER_MASK                (3<<23)
 #define  PF_FILTER_PROGRAMMED  (0<<23)
 #define  PF_FILTER_MED_3x3     (1<<23)
 #define  ILK_HDCP_DISABLE              (1<<25)
 #define  ILK_eDP_A_DISABLE             (1<<24)
 #define  ILK_DESKTOP                   (1<<23)
-#define ILK_DSPCLK_GATE                0x42020
-#define  IVB_VRHUNIT_CLK_GATE  (1<<28)
-#define  ILK_DPARB_CLK_GATE    (1<<5)
-#define  ILK_DPFD_CLK_GATE     (1<<7)
 
-/* According to spec this bit 7/8/9 of 0x42020 should be set to enable FBC */
-#define   ILK_CLK_FBC          (1<<7)
-#define   ILK_DPFC_DIS1                (1<<8)
-#define   ILK_DPFC_DIS2                (1<<9)
+#define ILK_DSPCLK_GATE_D                      0x42020
+#define   ILK_VRHUNIT_CLOCK_GATE_DISABLE       (1 << 28)
+#define   ILK_DPFCUNIT_CLOCK_GATE_DISABLE      (1 << 9)
+#define   ILK_DPFCRUNIT_CLOCK_GATE_DISABLE     (1 << 8)
+#define   ILK_DPFDUNIT_CLOCK_GATE_ENABLE       (1 << 7)
+#define   ILK_DPARBUNIT_CLOCK_GATE_ENABLE      (1 << 5)
 
 #define IVB_CHICKEN3   0x4200c
 # define CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE     (1 << 5)
 
 #define GEN7_L3CNTLREG1                                0xB01C
 #define  GEN7_WA_FOR_GEN7_L3_CONTROL                   0x3C4FFF8C
+#define  GEN7_L3AGDIS                          (1<<19)
 
 #define GEN7_L3_CHICKEN_MODE_REGISTER          0xB030
 #define  GEN7_WA_L3_CHICKEN_MODE                               0x20000000
 
+#define GEN7_L3SQCREG4                         0xb034
+#define  L3SQ_URB_READ_CAM_MATCH_DISABLE       (1<<27)
+
 /* WaCatErrorRejectionIssue */
 #define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG         0x9030
 #define  GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB      (1<<11)
 
+#define HSW_FUSE_STRAP         0x42014
+#define  HSW_CDCLK_LIMIT       (1 << 24)
+
 /* PCH */
 
 /* south display engine interrupt: IBX */
 #define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B)
 #define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B)
 
-#define VLV_VIDEO_DIP_CTL_A            0x60220
+#define VLV_VIDEO_DIP_CTL_A            0x60200
 #define VLV_VIDEO_DIP_DATA_A           0x60208
 #define VLV_VIDEO_DIP_GDCP_PAYLOAD_A   0x60210
 
 #define  TRANS_6BPC             (2<<5)
 #define  TRANS_12BPC            (3<<5)
 
+#define _TRANSA_CHICKEN1        0xf0060
+#define _TRANSB_CHICKEN1        0xf1060
+#define TRANS_CHICKEN1(pipe) _PIPE(pipe, _TRANSA_CHICKEN1, _TRANSB_CHICKEN1)
+#define  TRANS_CHICKEN1_DP0UNIT_GC_DISABLE     (1<<4)
 #define _TRANSA_CHICKEN2        0xf0064
 #define _TRANSB_CHICKEN2        0xf1064
 #define TRANS_CHICKEN2(pipe) _PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2)
-#define   TRANS_AUTOTRAIN_GEN_STALL_DIS        (1<<31)
+#define  TRANS_CHICKEN2_TIMING_OVERRIDE                (1<<31)
+
 
 #define SOUTH_CHICKEN1         0xc2000
 #define  FDIA_PHASE_SYNC_SHIFT_OVR     19
 #define  FDIA_PHASE_SYNC_SHIFT_EN      18
-#define FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2)))
-#define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2)))
+#define  FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2)))
+#define  FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2)))
+#define  FDI_BC_BIFURCATION_SELECT     (1 << 12)
 #define SOUTH_CHICKEN2         0xc2004
-#define  DPLS_EDP_PPS_FIX_DIS  (1<<0)
+#define  FDI_MPHY_IOSFSB_RESET_STATUS  (1<<13)
+#define  FDI_MPHY_IOSFSB_RESET_CTL     (1<<12)
+#define  DPLS_EDP_PPS_FIX_DIS          (1<<0)
 
 #define _FDI_RXA_CHICKEN         0xc200c
 #define _FDI_RXB_CHICKEN         0xc2010
 
 #define SOUTH_DSPCLK_GATE_D    0xc2020
 #define  PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29)
+#define  PCH_LP_PARTITION_LEVEL_DISABLE  (1<<12)
 
 /* CPU: FDI_TX */
 #define _FDI_TXA_CTL             0x60100
 #define  FDI_FS_ERRC_ENABLE            (1<<27)
 #define  FDI_FE_ERRC_ENABLE            (1<<26)
 #define  FDI_DP_PORT_WIDTH_X8           (7<<19)
+#define  FDI_RX_POLARITY_REVERSED_LPT  (1<<16)
 #define  FDI_8BPC                       (0<<16)
 #define  FDI_10BPC                      (1<<16)
 #define  FDI_6BPC                       (2<<16)
 #define  FDI_PORT_WIDTH_2X_LPT                 (1<<19)
 #define  FDI_PORT_WIDTH_1X_LPT                 (0<<19)
 
-#define _FDI_RXA_MISC            0xf0010
-#define _FDI_RXB_MISC            0xf1010
+#define _FDI_RXA_MISC                  0xf0010
+#define _FDI_RXB_MISC                  0xf1010
+#define  FDI_RX_PWRDN_LANE1_MASK       (3<<26)
+#define  FDI_RX_PWRDN_LANE1_VAL(x)     ((x)<<26)
+#define  FDI_RX_PWRDN_LANE0_MASK       (3<<24)
+#define  FDI_RX_PWRDN_LANE0_VAL(x)     ((x)<<24)
+#define  FDI_RX_TP1_TO_TP2_48          (2<<20)
+#define  FDI_RX_TP1_TO_TP2_64          (3<<20)
+#define  FDI_RX_FDI_DELAY_90           (0x90<<0)
+#define FDI_RX_MISC(pipe) _PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC)
+
 #define _FDI_RXA_TUSIZE1         0xf0030
 #define _FDI_RXA_TUSIZE2         0xf0038
 #define _FDI_RXB_TUSIZE1         0xf1030
 #define _FDI_RXB_TUSIZE2         0xf1038
-#define  FDI_RX_TP1_TO_TP2_48  (2<<20)
-#define  FDI_RX_TP1_TO_TP2_64  (3<<20)
-#define  FDI_RX_FDI_DELAY_90   (0x90<<0)
-#define FDI_RX_MISC(pipe) _PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC)
 #define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE1, _FDI_RXB_TUSIZE1)
 #define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE2, _FDI_RXB_TUSIZE2)
 
 #define  PANEL_LIGHT_ON_DELAY_SHIFT    0
 
 #define PCH_PP_OFF_DELAYS      0xc720c
+#define  PANEL_POWER_PORT_SELECT_MASK  (0x3 << 30)
+#define  PANEL_POWER_PORT_LVDS         (0 << 30)
+#define  PANEL_POWER_PORT_DP_A         (1 << 30)
+#define  PANEL_POWER_PORT_DP_C         (2 << 30)
+#define  PANEL_POWER_PORT_DP_D         (3 << 30)
 #define  PANEL_POWER_DOWN_DELAY_MASK   (0x1fff0000)
 #define  PANEL_POWER_DOWN_DELAY_SHIFT  16
 #define  PANEL_LIGHT_OFF_DELAY_MASK    (0x1fff)
 #define TRANS_DP_CTL_A         0xe0300
 #define TRANS_DP_CTL_B         0xe1300
 #define TRANS_DP_CTL_C         0xe2300
-#define TRANS_DP_CTL(pipe)     (TRANS_DP_CTL_A + (pipe) * 0x01000)
+#define TRANS_DP_CTL(pipe)     _PIPE(pipe, TRANS_DP_CTL_A, TRANS_DP_CTL_B)
 #define  TRANS_DP_OUTPUT_ENABLE        (1<<31)
 #define  TRANS_DP_PORT_SEL_B   (0<<29)
 #define  TRANS_DP_PORT_SEL_C   (1<<29)
 #define  FORCEWAKE_ACK_HSW                     0x130044
 #define  FORCEWAKE_ACK                         0x130090
 #define  FORCEWAKE_MT                          0xa188 /* multi-threaded */
+#define   FORCEWAKE_KERNEL                     0x1
+#define   FORCEWAKE_USER                       0x2
 #define  FORCEWAKE_MT_ACK                      0x130040
 #define  ECOBUS                                        0xa180
 #define    FORCEWAKE_MT_ENABLE                 (1<<5)
 #define   GEN6_READ_OC_PARAMS                  0xc
 #define   GEN6_PCODE_WRITE_MIN_FREQ_TABLE      0x8
 #define   GEN6_PCODE_READ_MIN_FREQ_TABLE       0x9
+#define          GEN6_PCODE_WRITE_RC6VIDS              0x4
+#define          GEN6_PCODE_READ_RC6VIDS               0x5
+#define   GEN6_ENCODE_RC6_VID(mv)              (((mv) / 5) - 245) < 0 ?: 0
+#define   GEN6_DECODE_RC6_VID(vids)            (((vids) * 5) > 0 ? ((vids) * 5) + 245 : 0)
 #define GEN6_PCODE_DATA                                0x138128
 #define   GEN6_PCODE_FREQ_IA_RATIO_SHIFT       8
 
 #define GEN7_L3LOG_BASE                        0xB070
 #define GEN7_L3LOG_SIZE                        0x80
 
+#define GEN7_HALF_SLICE_CHICKEN1       0xe100 /* IVB GT1 + VLV */
+#define GEN7_HALF_SLICE_CHICKEN1_GT2   0xf100
+#define   GEN7_MAX_PS_THREAD_DEP               (8<<12)
+#define   GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3)
+
+#define GEN7_ROW_CHICKEN2              0xe4f4
+#define GEN7_ROW_CHICKEN2_GT2          0xf4f4
+#define   DOP_CLOCK_GATING_DISABLE     (1<<0)
+
 #define G4X_AUD_VID_DID                        0x62020
 #define INTEL_AUDIO_DEVCL              0x808629FB
 #define INTEL_AUDIO_DEVBLC             0x80862801
 #define HSW_PWR_WELL_CTL6                      0x45414
 
 /* Per-pipe DDI Function Control */
-#define PIPE_DDI_FUNC_CTL_A            0x60400
-#define PIPE_DDI_FUNC_CTL_B            0x61400
-#define PIPE_DDI_FUNC_CTL_C            0x62400
-#define PIPE_DDI_FUNC_CTL_EDP          0x6F400
-#define DDI_FUNC_CTL(pipe) _PIPE(pipe, PIPE_DDI_FUNC_CTL_A, \
-                                      PIPE_DDI_FUNC_CTL_B)
-#define  PIPE_DDI_FUNC_ENABLE          (1<<31)
+#define TRANS_DDI_FUNC_CTL_A           0x60400
+#define TRANS_DDI_FUNC_CTL_B           0x61400
+#define TRANS_DDI_FUNC_CTL_C           0x62400
+#define TRANS_DDI_FUNC_CTL_EDP         0x6F400
+#define TRANS_DDI_FUNC_CTL(tran) _TRANSCODER(tran, TRANS_DDI_FUNC_CTL_A, \
+                                                  TRANS_DDI_FUNC_CTL_B)
+#define  TRANS_DDI_FUNC_ENABLE         (1<<31)
 /* Those bits are ignored by pipe EDP since it can only connect to DDI A */
-#define  PIPE_DDI_PORT_MASK            (7<<28)
-#define  PIPE_DDI_SELECT_PORT(x)       ((x)<<28)
-#define  PIPE_DDI_MODE_SELECT_MASK     (7<<24)
-#define  PIPE_DDI_MODE_SELECT_HDMI     (0<<24)
-#define  PIPE_DDI_MODE_SELECT_DVI      (1<<24)
-#define  PIPE_DDI_MODE_SELECT_DP_SST   (2<<24)
-#define  PIPE_DDI_MODE_SELECT_DP_MST   (3<<24)
-#define  PIPE_DDI_MODE_SELECT_FDI      (4<<24)
-#define  PIPE_DDI_BPC_MASK             (7<<20)
-#define  PIPE_DDI_BPC_8                        (0<<20)
-#define  PIPE_DDI_BPC_10               (1<<20)
-#define  PIPE_DDI_BPC_6                        (2<<20)
-#define  PIPE_DDI_BPC_12               (3<<20)
-#define  PIPE_DDI_PVSYNC               (1<<17)
-#define  PIPE_DDI_PHSYNC               (1<<16)
-#define  PIPE_DDI_BFI_ENABLE           (1<<4)
-#define  PIPE_DDI_PORT_WIDTH_X1                (0<<1)
-#define  PIPE_DDI_PORT_WIDTH_X2                (1<<1)
-#define  PIPE_DDI_PORT_WIDTH_X4                (3<<1)
+#define  TRANS_DDI_PORT_MASK           (7<<28)
+#define  TRANS_DDI_SELECT_PORT(x)      ((x)<<28)
+#define  TRANS_DDI_PORT_NONE           (0<<28)
+#define  TRANS_DDI_MODE_SELECT_MASK    (7<<24)
+#define  TRANS_DDI_MODE_SELECT_HDMI    (0<<24)
+#define  TRANS_DDI_MODE_SELECT_DVI     (1<<24)
+#define  TRANS_DDI_MODE_SELECT_DP_SST  (2<<24)
+#define  TRANS_DDI_MODE_SELECT_DP_MST  (3<<24)
+#define  TRANS_DDI_MODE_SELECT_FDI     (4<<24)
+#define  TRANS_DDI_BPC_MASK            (7<<20)
+#define  TRANS_DDI_BPC_8               (0<<20)
+#define  TRANS_DDI_BPC_10              (1<<20)
+#define  TRANS_DDI_BPC_6               (2<<20)
+#define  TRANS_DDI_BPC_12              (3<<20)
+#define  TRANS_DDI_PVSYNC              (1<<17)
+#define  TRANS_DDI_PHSYNC              (1<<16)
+#define  TRANS_DDI_EDP_INPUT_MASK      (7<<12)
+#define  TRANS_DDI_EDP_INPUT_A_ON      (0<<12)
+#define  TRANS_DDI_EDP_INPUT_A_ONOFF   (4<<12)
+#define  TRANS_DDI_EDP_INPUT_B_ONOFF   (5<<12)
+#define  TRANS_DDI_EDP_INPUT_C_ONOFF   (6<<12)
+#define  TRANS_DDI_BFI_ENABLE          (1<<4)
+#define  TRANS_DDI_PORT_WIDTH_X1       (0<<1)
+#define  TRANS_DDI_PORT_WIDTH_X2       (1<<1)
+#define  TRANS_DDI_PORT_WIDTH_X4       (3<<1)
 
 /* DisplayPort Transport Control */
 #define DP_TP_CTL_A                    0x64040
 #define  DP_TP_CTL_LINK_TRAIN_MASK             (7<<8)
 #define  DP_TP_CTL_LINK_TRAIN_PAT1             (0<<8)
 #define  DP_TP_CTL_LINK_TRAIN_PAT2             (1<<8)
+#define  DP_TP_CTL_LINK_TRAIN_PAT3             (4<<8)
+#define  DP_TP_CTL_LINK_TRAIN_IDLE             (2<<8)
 #define  DP_TP_CTL_LINK_TRAIN_NORMAL           (3<<8)
+#define  DP_TP_CTL_SCRAMBLE_DISABLE            (1<<7)
 
 /* DisplayPort Transport Status */
 #define DP_TP_STATUS_A                 0x64044
 #define DP_TP_STATUS_B                 0x64144
 #define DP_TP_STATUS(port) _PORT(port, DP_TP_STATUS_A, DP_TP_STATUS_B)
+#define  DP_TP_STATUS_IDLE_DONE                (1<<25)
 #define  DP_TP_STATUS_AUTOTRAIN_DONE   (1<<12)
 
 /* DDI Buffer Control */
 #define  DDI_BUF_EMP_800MV_3_5DB_HSW           (8<<24)   /* Sel8 */
 #define  DDI_BUF_EMP_MASK                      (0xf<<24)
 #define  DDI_BUF_IS_IDLE                       (1<<7)
+#define  DDI_A_4_LANES                         (1<<4)
 #define  DDI_PORT_WIDTH_X1                     (0<<1)
 #define  DDI_PORT_WIDTH_X2                     (1<<1)
 #define  DDI_PORT_WIDTH_X4                     (3<<1)
 #define SBI_ADDR                       0xC6000
 #define SBI_DATA                       0xC6004
 #define SBI_CTL_STAT                   0xC6008
+#define  SBI_CTL_DEST_ICLK             (0x0<<16)
+#define  SBI_CTL_DEST_MPHY             (0x1<<16)
+#define  SBI_CTL_OP_IORD               (0x2<<8)
+#define  SBI_CTL_OP_IOWR               (0x3<<8)
 #define  SBI_CTL_OP_CRRD               (0x6<<8)
 #define  SBI_CTL_OP_CRWR               (0x7<<8)
 #define  SBI_RESPONSE_FAIL             (0x1<<1)
 #define   SBI_SSCDIVINTPHASE_PROPAGATE         (1<<0)
 #define  SBI_SSCCTL                            0x020c
 #define  SBI_SSCCTL6                           0x060C
+#define   SBI_SSCCTL_PATHALT                   (1<<3)
 #define   SBI_SSCCTL_DISABLE                   (1<<0)
 #define  SBI_SSCAUXDIV6                                0x0610
 #define   SBI_SSCAUXDIV_FINALDIV2SEL(x)                ((x)<<4)
 #define  SBI_DBUFF0                            0x2a00
+#define   SBI_DBUFF0_ENABLE                    (1<<0)
 
 /* LPT PIXCLK_GATE */
 #define PIXCLK_GATE                    0xC6020
 /* SPLL */
 #define SPLL_CTL                       0x46020
 #define  SPLL_PLL_ENABLE               (1<<31)
-#define  SPLL_PLL_SCC                  (1<<28)
-#define  SPLL_PLL_NON_SCC              (2<<28)
+#define  SPLL_PLL_SSC                  (1<<28)
+#define  SPLL_PLL_NON_SSC              (2<<28)
 #define  SPLL_PLL_FREQ_810MHz          (0<<26)
 #define  SPLL_PLL_FREQ_1350MHz         (1<<26)
 
 #define WRPLL_CTL2                     0x46060
 #define  WRPLL_PLL_ENABLE              (1<<31)
 #define  WRPLL_PLL_SELECT_SSC          (0x01<<28)
-#define  WRPLL_PLL_SELECT_NON_SCC      (0x02<<28)
+#define  WRPLL_PLL_SELECT_NON_SSC      (0x02<<28)
 #define  WRPLL_PLL_SELECT_LCPLL_2700   (0x03<<28)
 /* WRPLL divider programming */
 #define  WRPLL_DIVIDER_REFERENCE(x)    ((x)<<0)
 #define  PORT_CLK_SEL_SPLL             (3<<29)
 #define  PORT_CLK_SEL_WRPLL1           (4<<29)
 #define  PORT_CLK_SEL_WRPLL2           (5<<29)
-
-/* Pipe clock selection */
-#define PIPE_CLK_SEL_A                 0x46140
-#define PIPE_CLK_SEL_B                 0x46144
-#define PIPE_CLK_SEL(pipe) _PIPE(pipe, PIPE_CLK_SEL_A, PIPE_CLK_SEL_B)
-/* For each pipe, we need to select the corresponding port clock */
-#define  PIPE_CLK_SEL_DISABLED         (0x0<<29)
-#define  PIPE_CLK_SEL_PORT(x)          ((x+1)<<29)
+#define  PORT_CLK_SEL_NONE             (7<<29)
+
+/* Transcoder clock selection */
+#define TRANS_CLK_SEL_A                        0x46140
+#define TRANS_CLK_SEL_B                        0x46144
+#define TRANS_CLK_SEL(tran) _TRANSCODER(tran, TRANS_CLK_SEL_A, TRANS_CLK_SEL_B)
+/* For each transcoder, we need to select the corresponding port clock */
+#define  TRANS_CLK_SEL_DISABLED                (0x0<<29)
+#define  TRANS_CLK_SEL_PORT(x)         ((x+1)<<29)
+
+#define _TRANSA_MSA_MISC               0x60410
+#define _TRANSB_MSA_MISC               0x61410
+#define TRANS_MSA_MISC(tran) _TRANSCODER(tran, _TRANSA_MSA_MISC, \
+                                              _TRANSB_MSA_MISC)
+#define  TRANS_MSA_SYNC_CLK            (1<<0)
+#define  TRANS_MSA_6_BPC               (0<<5)
+#define  TRANS_MSA_8_BPC               (1<<5)
+#define  TRANS_MSA_10_BPC              (2<<5)
+#define  TRANS_MSA_12_BPC              (3<<5)
+#define  TRANS_MSA_16_BPC              (4<<5)
 
 /* LCPLL Control */
 #define LCPLL_CTL                      0x130040
 #define  LCPLL_PLL_DISABLE             (1<<31)
 #define  LCPLL_PLL_LOCK                        (1<<30)
+#define  LCPLL_CLK_FREQ_MASK           (3<<26)
+#define  LCPLL_CLK_FREQ_450            (0<<26)
 #define  LCPLL_CD_CLOCK_DISABLE                (1<<25)
 #define  LCPLL_CD2X_CLOCK_DISABLE      (1<<23)
+#define  LCPLL_CD_SOURCE_FCLK          (1<<21)
 
 /* Pipe WM_LINETIME - watermark line time */
 #define PIPE_WM_LINETIME_A             0x45270
index 5854bdd..63d4d30 100644 (file)
@@ -60,9 +60,9 @@ static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
                reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
 
        if (pipe == PIPE_A)
-               array = dev_priv->save_palette_a;
+               array = dev_priv->regfile.save_palette_a;
        else
-               array = dev_priv->save_palette_b;
+               array = dev_priv->regfile.save_palette_b;
 
        for (i = 0; i < 256; i++)
                array[i] = I915_READ(reg + (i << 2));
@@ -82,9 +82,9 @@ static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
                reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
 
        if (pipe == PIPE_A)
-               array = dev_priv->save_palette_a;
+               array = dev_priv->regfile.save_palette_a;
        else
-               array = dev_priv->save_palette_b;
+               array = dev_priv->regfile.save_palette_b;
 
        for (i = 0; i < 256; i++)
                I915_WRITE(reg + (i << 2), array[i]);
@@ -131,11 +131,11 @@ static void i915_save_vga(struct drm_device *dev)
        u16 cr_index, cr_data, st01;
 
        /* VGA color palette registers */
-       dev_priv->saveDACMASK = I915_READ8(VGA_DACMASK);
+       dev_priv->regfile.saveDACMASK = I915_READ8(VGA_DACMASK);
 
        /* MSR bits */
-       dev_priv->saveMSR = I915_READ8(VGA_MSR_READ);
-       if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
+       dev_priv->regfile.saveMSR = I915_READ8(VGA_MSR_READ);
+       if (dev_priv->regfile.saveMSR & VGA_MSR_CGA_MODE) {
                cr_index = VGA_CR_INDEX_CGA;
                cr_data = VGA_CR_DATA_CGA;
                st01 = VGA_ST01_CGA;
@@ -150,35 +150,35 @@ static void i915_save_vga(struct drm_device *dev)
                           i915_read_indexed(dev, cr_index, cr_data, 0x11) &
                           (~0x80));
        for (i = 0; i <= 0x24; i++)
-               dev_priv->saveCR[i] =
+               dev_priv->regfile.saveCR[i] =
                        i915_read_indexed(dev, cr_index, cr_data, i);
        /* Make sure we don't turn off CR group 0 writes */
-       dev_priv->saveCR[0x11] &= ~0x80;
+       dev_priv->regfile.saveCR[0x11] &= ~0x80;
 
        /* Attribute controller registers */
        I915_READ8(st01);
-       dev_priv->saveAR_INDEX = I915_READ8(VGA_AR_INDEX);
+       dev_priv->regfile.saveAR_INDEX = I915_READ8(VGA_AR_INDEX);
        for (i = 0; i <= 0x14; i++)
-               dev_priv->saveAR[i] = i915_read_ar(dev, st01, i, 0);
+               dev_priv->regfile.saveAR[i] = i915_read_ar(dev, st01, i, 0);
        I915_READ8(st01);
-       I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX);
+       I915_WRITE8(VGA_AR_INDEX, dev_priv->regfile.saveAR_INDEX);
        I915_READ8(st01);
 
        /* Graphics controller registers */
        for (i = 0; i < 9; i++)
-               dev_priv->saveGR[i] =
+               dev_priv->regfile.saveGR[i] =
                        i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i);
 
-       dev_priv->saveGR[0x10] =
+       dev_priv->regfile.saveGR[0x10] =
                i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10);
-       dev_priv->saveGR[0x11] =
+       dev_priv->regfile.saveGR[0x11] =
                i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11);
-       dev_priv->saveGR[0x18] =
+       dev_priv->regfile.saveGR[0x18] =
                i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18);
 
        /* Sequencer registers */
        for (i = 0; i < 8; i++)
-               dev_priv->saveSR[i] =
+               dev_priv->regfile.saveSR[i] =
                        i915_read_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i);
 }
 
@@ -189,8 +189,8 @@ static void i915_restore_vga(struct drm_device *dev)
        u16 cr_index, cr_data, st01;
 
        /* MSR bits */
-       I915_WRITE8(VGA_MSR_WRITE, dev_priv->saveMSR);
-       if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
+       I915_WRITE8(VGA_MSR_WRITE, dev_priv->regfile.saveMSR);
+       if (dev_priv->regfile.saveMSR & VGA_MSR_CGA_MODE) {
                cr_index = VGA_CR_INDEX_CGA;
                cr_data = VGA_CR_DATA_CGA;
                st01 = VGA_ST01_CGA;
@@ -203,36 +203,36 @@ static void i915_restore_vga(struct drm_device *dev)
        /* Sequencer registers, don't write SR07 */
        for (i = 0; i < 7; i++)
                i915_write_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i,
-                                  dev_priv->saveSR[i]);
+                                  dev_priv->regfile.saveSR[i]);
 
        /* CRT controller regs */
        /* Enable CR group 0 writes */
-       i915_write_indexed(dev, cr_index, cr_data, 0x11, dev_priv->saveCR[0x11]);
+       i915_write_indexed(dev, cr_index, cr_data, 0x11, dev_priv->regfile.saveCR[0x11]);
        for (i = 0; i <= 0x24; i++)
-               i915_write_indexed(dev, cr_index, cr_data, i, dev_priv->saveCR[i]);
+               i915_write_indexed(dev, cr_index, cr_data, i, dev_priv->regfile.saveCR[i]);
 
        /* Graphics controller regs */
        for (i = 0; i < 9; i++)
                i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i,
-                                  dev_priv->saveGR[i]);
+                                  dev_priv->regfile.saveGR[i]);
 
        i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10,
-                          dev_priv->saveGR[0x10]);
+                          dev_priv->regfile.saveGR[0x10]);
        i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11,
-                          dev_priv->saveGR[0x11]);
+                          dev_priv->regfile.saveGR[0x11]);
        i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18,
-                          dev_priv->saveGR[0x18]);
+                          dev_priv->regfile.saveGR[0x18]);
 
        /* Attribute controller registers */
        I915_READ8(st01); /* switch back to index mode */
        for (i = 0; i <= 0x14; i++)
-               i915_write_ar(dev, st01, i, dev_priv->saveAR[i], 0);
+               i915_write_ar(dev, st01, i, dev_priv->regfile.saveAR[i], 0);
        I915_READ8(st01); /* switch back to index mode */
-       I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX | 0x20);
+       I915_WRITE8(VGA_AR_INDEX, dev_priv->regfile.saveAR_INDEX | 0x20);
        I915_READ8(st01);
 
        /* VGA color palette registers */
-       I915_WRITE8(VGA_DACMASK, dev_priv->saveDACMASK);
+       I915_WRITE8(VGA_DACMASK, dev_priv->regfile.saveDACMASK);
 }
 
 static void i915_save_modeset_reg(struct drm_device *dev)
@@ -244,156 +244,162 @@ static void i915_save_modeset_reg(struct drm_device *dev)
                return;
 
        /* Cursor state */
-       dev_priv->saveCURACNTR = I915_READ(_CURACNTR);
-       dev_priv->saveCURAPOS = I915_READ(_CURAPOS);
-       dev_priv->saveCURABASE = I915_READ(_CURABASE);
-       dev_priv->saveCURBCNTR = I915_READ(_CURBCNTR);
-       dev_priv->saveCURBPOS = I915_READ(_CURBPOS);
-       dev_priv->saveCURBBASE = I915_READ(_CURBBASE);
+       dev_priv->regfile.saveCURACNTR = I915_READ(_CURACNTR);
+       dev_priv->regfile.saveCURAPOS = I915_READ(_CURAPOS);
+       dev_priv->regfile.saveCURABASE = I915_READ(_CURABASE);
+       dev_priv->regfile.saveCURBCNTR = I915_READ(_CURBCNTR);
+       dev_priv->regfile.saveCURBPOS = I915_READ(_CURBPOS);
+       dev_priv->regfile.saveCURBBASE = I915_READ(_CURBBASE);
        if (IS_GEN2(dev))
-               dev_priv->saveCURSIZE = I915_READ(CURSIZE);
+               dev_priv->regfile.saveCURSIZE = I915_READ(CURSIZE);
 
        if (HAS_PCH_SPLIT(dev)) {
-               dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
-               dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
+               dev_priv->regfile.savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
+               dev_priv->regfile.saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
        }
 
        /* Pipe & plane A info */
-       dev_priv->savePIPEACONF = I915_READ(_PIPEACONF);
-       dev_priv->savePIPEASRC = I915_READ(_PIPEASRC);
+       dev_priv->regfile.savePIPEACONF = I915_READ(_PIPEACONF);
+       dev_priv->regfile.savePIPEASRC = I915_READ(_PIPEASRC);
        if (HAS_PCH_SPLIT(dev)) {
-               dev_priv->saveFPA0 = I915_READ(_PCH_FPA0);
-               dev_priv->saveFPA1 = I915_READ(_PCH_FPA1);
-               dev_priv->saveDPLL_A = I915_READ(_PCH_DPLL_A);
+               dev_priv->regfile.saveFPA0 = I915_READ(_PCH_FPA0);
+               dev_priv->regfile.saveFPA1 = I915_READ(_PCH_FPA1);
+               dev_priv->regfile.saveDPLL_A = I915_READ(_PCH_DPLL_A);
        } else {
-               dev_priv->saveFPA0 = I915_READ(_FPA0);
-               dev_priv->saveFPA1 = I915_READ(_FPA1);
-               dev_priv->saveDPLL_A = I915_READ(_DPLL_A);
+               dev_priv->regfile.saveFPA0 = I915_READ(_FPA0);
+               dev_priv->regfile.saveFPA1 = I915_READ(_FPA1);
+               dev_priv->regfile.saveDPLL_A = I915_READ(_DPLL_A);
        }
        if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
-               dev_priv->saveDPLL_A_MD = I915_READ(_DPLL_A_MD);
-       dev_priv->saveHTOTAL_A = I915_READ(_HTOTAL_A);
-       dev_priv->saveHBLANK_A = I915_READ(_HBLANK_A);
-       dev_priv->saveHSYNC_A = I915_READ(_HSYNC_A);
-       dev_priv->saveVTOTAL_A = I915_READ(_VTOTAL_A);
-       dev_priv->saveVBLANK_A = I915_READ(_VBLANK_A);
-       dev_priv->saveVSYNC_A = I915_READ(_VSYNC_A);
+               dev_priv->regfile.saveDPLL_A_MD = I915_READ(_DPLL_A_MD);
+       dev_priv->regfile.saveHTOTAL_A = I915_READ(_HTOTAL_A);
+       dev_priv->regfile.saveHBLANK_A = I915_READ(_HBLANK_A);
+       dev_priv->regfile.saveHSYNC_A = I915_READ(_HSYNC_A);
+       dev_priv->regfile.saveVTOTAL_A = I915_READ(_VTOTAL_A);
+       dev_priv->regfile.saveVBLANK_A = I915_READ(_VBLANK_A);
+       dev_priv->regfile.saveVSYNC_A = I915_READ(_VSYNC_A);
        if (!HAS_PCH_SPLIT(dev))
-               dev_priv->saveBCLRPAT_A = I915_READ(_BCLRPAT_A);
+               dev_priv->regfile.saveBCLRPAT_A = I915_READ(_BCLRPAT_A);
 
        if (HAS_PCH_SPLIT(dev)) {
-               dev_priv->savePIPEA_DATA_M1 = I915_READ(_PIPEA_DATA_M1);
-               dev_priv->savePIPEA_DATA_N1 = I915_READ(_PIPEA_DATA_N1);
-               dev_priv->savePIPEA_LINK_M1 = I915_READ(_PIPEA_LINK_M1);
-               dev_priv->savePIPEA_LINK_N1 = I915_READ(_PIPEA_LINK_N1);
-
-               dev_priv->saveFDI_TXA_CTL = I915_READ(_FDI_TXA_CTL);
-               dev_priv->saveFDI_RXA_CTL = I915_READ(_FDI_RXA_CTL);
-
-               dev_priv->savePFA_CTL_1 = I915_READ(_PFA_CTL_1);
-               dev_priv->savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ);
-               dev_priv->savePFA_WIN_POS = I915_READ(_PFA_WIN_POS);
-
-               dev_priv->saveTRANSACONF = I915_READ(_TRANSACONF);
-               dev_priv->saveTRANS_HTOTAL_A = I915_READ(_TRANS_HTOTAL_A);
-               dev_priv->saveTRANS_HBLANK_A = I915_READ(_TRANS_HBLANK_A);
-               dev_priv->saveTRANS_HSYNC_A = I915_READ(_TRANS_HSYNC_A);
-               dev_priv->saveTRANS_VTOTAL_A = I915_READ(_TRANS_VTOTAL_A);
-               dev_priv->saveTRANS_VBLANK_A = I915_READ(_TRANS_VBLANK_A);
-               dev_priv->saveTRANS_VSYNC_A = I915_READ(_TRANS_VSYNC_A);
-       }
-
-       dev_priv->saveDSPACNTR = I915_READ(_DSPACNTR);
-       dev_priv->saveDSPASTRIDE = I915_READ(_DSPASTRIDE);
-       dev_priv->saveDSPASIZE = I915_READ(_DSPASIZE);
-       dev_priv->saveDSPAPOS = I915_READ(_DSPAPOS);
-       dev_priv->saveDSPAADDR = I915_READ(_DSPAADDR);
+               dev_priv->regfile.savePIPEA_DATA_M1 = I915_READ(_PIPEA_DATA_M1);
+               dev_priv->regfile.savePIPEA_DATA_N1 = I915_READ(_PIPEA_DATA_N1);
+               dev_priv->regfile.savePIPEA_LINK_M1 = I915_READ(_PIPEA_LINK_M1);
+               dev_priv->regfile.savePIPEA_LINK_N1 = I915_READ(_PIPEA_LINK_N1);
+
+               dev_priv->regfile.saveFDI_TXA_CTL = I915_READ(_FDI_TXA_CTL);
+               dev_priv->regfile.saveFDI_RXA_CTL = I915_READ(_FDI_RXA_CTL);
+
+               dev_priv->regfile.savePFA_CTL_1 = I915_READ(_PFA_CTL_1);
+               dev_priv->regfile.savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ);
+               dev_priv->regfile.savePFA_WIN_POS = I915_READ(_PFA_WIN_POS);
+
+               dev_priv->regfile.saveTRANSACONF = I915_READ(_TRANSACONF);
+               dev_priv->regfile.saveTRANS_HTOTAL_A = I915_READ(_TRANS_HTOTAL_A);
+               dev_priv->regfile.saveTRANS_HBLANK_A = I915_READ(_TRANS_HBLANK_A);
+               dev_priv->regfile.saveTRANS_HSYNC_A = I915_READ(_TRANS_HSYNC_A);
+               dev_priv->regfile.saveTRANS_VTOTAL_A = I915_READ(_TRANS_VTOTAL_A);
+               dev_priv->regfile.saveTRANS_VBLANK_A = I915_READ(_TRANS_VBLANK_A);
+               dev_priv->regfile.saveTRANS_VSYNC_A = I915_READ(_TRANS_VSYNC_A);
+       }
+
+       dev_priv->regfile.saveDSPACNTR = I915_READ(_DSPACNTR);
+       dev_priv->regfile.saveDSPASTRIDE = I915_READ(_DSPASTRIDE);
+       dev_priv->regfile.saveDSPASIZE = I915_READ(_DSPASIZE);
+       dev_priv->regfile.saveDSPAPOS = I915_READ(_DSPAPOS);
+       dev_priv->regfile.saveDSPAADDR = I915_READ(_DSPAADDR);
        if (INTEL_INFO(dev)->gen >= 4) {
-               dev_priv->saveDSPASURF = I915_READ(_DSPASURF);
-               dev_priv->saveDSPATILEOFF = I915_READ(_DSPATILEOFF);
+               dev_priv->regfile.saveDSPASURF = I915_READ(_DSPASURF);
+               dev_priv->regfile.saveDSPATILEOFF = I915_READ(_DSPATILEOFF);
        }
        i915_save_palette(dev, PIPE_A);
-       dev_priv->savePIPEASTAT = I915_READ(_PIPEASTAT);
+       dev_priv->regfile.savePIPEASTAT = I915_READ(_PIPEASTAT);
 
        /* Pipe & plane B info */
-       dev_priv->savePIPEBCONF = I915_READ(_PIPEBCONF);
-       dev_priv->savePIPEBSRC = I915_READ(_PIPEBSRC);
+       dev_priv->regfile.savePIPEBCONF = I915_READ(_PIPEBCONF);
+       dev_priv->regfile.savePIPEBSRC = I915_READ(_PIPEBSRC);
        if (HAS_PCH_SPLIT(dev)) {
-               dev_priv->saveFPB0 = I915_READ(_PCH_FPB0);
-               dev_priv->saveFPB1 = I915_READ(_PCH_FPB1);
-               dev_priv->saveDPLL_B = I915_READ(_PCH_DPLL_B);
+               dev_priv->regfile.saveFPB0 = I915_READ(_PCH_FPB0);
+               dev_priv->regfile.saveFPB1 = I915_READ(_PCH_FPB1);
+               dev_priv->regfile.saveDPLL_B = I915_READ(_PCH_DPLL_B);
        } else {
-               dev_priv->saveFPB0 = I915_READ(_FPB0);
-               dev_priv->saveFPB1 = I915_READ(_FPB1);
-               dev_priv->saveDPLL_B = I915_READ(_DPLL_B);
+               dev_priv->regfile.saveFPB0 = I915_READ(_FPB0);
+               dev_priv->regfile.saveFPB1 = I915_READ(_FPB1);
+               dev_priv->regfile.saveDPLL_B = I915_READ(_DPLL_B);
        }
        if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
-               dev_priv->saveDPLL_B_MD = I915_READ(_DPLL_B_MD);
-       dev_priv->saveHTOTAL_B = I915_READ(_HTOTAL_B);
-       dev_priv->saveHBLANK_B = I915_READ(_HBLANK_B);
-       dev_priv->saveHSYNC_B = I915_READ(_HSYNC_B);
-       dev_priv->saveVTOTAL_B = I915_READ(_VTOTAL_B);
-       dev_priv->saveVBLANK_B = I915_READ(_VBLANK_B);
-       dev_priv->saveVSYNC_B = I915_READ(_VSYNC_B);
+               dev_priv->regfile.saveDPLL_B_MD = I915_READ(_DPLL_B_MD);
+       dev_priv->regfile.saveHTOTAL_B = I915_READ(_HTOTAL_B);
+       dev_priv->regfile.saveHBLANK_B = I915_READ(_HBLANK_B);
+       dev_priv->regfile.saveHSYNC_B = I915_READ(_HSYNC_B);
+       dev_priv->regfile.saveVTOTAL_B = I915_READ(_VTOTAL_B);
+       dev_priv->regfile.saveVBLANK_B = I915_READ(_VBLANK_B);
+       dev_priv->regfile.saveVSYNC_B = I915_READ(_VSYNC_B);
        if (!HAS_PCH_SPLIT(dev))
-               dev_priv->saveBCLRPAT_B = I915_READ(_BCLRPAT_B);
+               dev_priv->regfile.saveBCLRPAT_B = I915_READ(_BCLRPAT_B);
 
        if (HAS_PCH_SPLIT(dev)) {
-               dev_priv->savePIPEB_DATA_M1 = I915_READ(_PIPEB_DATA_M1);
-               dev_priv->savePIPEB_DATA_N1 = I915_READ(_PIPEB_DATA_N1);
-               dev_priv->savePIPEB_LINK_M1 = I915_READ(_PIPEB_LINK_M1);
-               dev_priv->savePIPEB_LINK_N1 = I915_READ(_PIPEB_LINK_N1);
-
-               dev_priv->saveFDI_TXB_CTL = I915_READ(_FDI_TXB_CTL);
-               dev_priv->saveFDI_RXB_CTL = I915_READ(_FDI_RXB_CTL);
-
-               dev_priv->savePFB_CTL_1 = I915_READ(_PFB_CTL_1);
-               dev_priv->savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ);
-               dev_priv->savePFB_WIN_POS = I915_READ(_PFB_WIN_POS);
-
-               dev_priv->saveTRANSBCONF = I915_READ(_TRANSBCONF);
-               dev_priv->saveTRANS_HTOTAL_B = I915_READ(_TRANS_HTOTAL_B);
-               dev_priv->saveTRANS_HBLANK_B = I915_READ(_TRANS_HBLANK_B);
-               dev_priv->saveTRANS_HSYNC_B = I915_READ(_TRANS_HSYNC_B);
-               dev_priv->saveTRANS_VTOTAL_B = I915_READ(_TRANS_VTOTAL_B);
-               dev_priv->saveTRANS_VBLANK_B = I915_READ(_TRANS_VBLANK_B);
-               dev_priv->saveTRANS_VSYNC_B = I915_READ(_TRANS_VSYNC_B);
-       }
-
-       dev_priv->saveDSPBCNTR = I915_READ(_DSPBCNTR);
-       dev_priv->saveDSPBSTRIDE = I915_READ(_DSPBSTRIDE);
-       dev_priv->saveDSPBSIZE = I915_READ(_DSPBSIZE);
-       dev_priv->saveDSPBPOS = I915_READ(_DSPBPOS);
-       dev_priv->saveDSPBADDR = I915_READ(_DSPBADDR);
+               dev_priv->regfile.savePIPEB_DATA_M1 = I915_READ(_PIPEB_DATA_M1);
+               dev_priv->regfile.savePIPEB_DATA_N1 = I915_READ(_PIPEB_DATA_N1);
+               dev_priv->regfile.savePIPEB_LINK_M1 = I915_READ(_PIPEB_LINK_M1);
+               dev_priv->regfile.savePIPEB_LINK_N1 = I915_READ(_PIPEB_LINK_N1);
+
+               dev_priv->regfile.saveFDI_TXB_CTL = I915_READ(_FDI_TXB_CTL);
+               dev_priv->regfile.saveFDI_RXB_CTL = I915_READ(_FDI_RXB_CTL);
+
+               dev_priv->regfile.savePFB_CTL_1 = I915_READ(_PFB_CTL_1);
+               dev_priv->regfile.savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ);
+               dev_priv->regfile.savePFB_WIN_POS = I915_READ(_PFB_WIN_POS);
+
+               dev_priv->regfile.saveTRANSBCONF = I915_READ(_TRANSBCONF);
+               dev_priv->regfile.saveTRANS_HTOTAL_B = I915_READ(_TRANS_HTOTAL_B);
+               dev_priv->regfile.saveTRANS_HBLANK_B = I915_READ(_TRANS_HBLANK_B);
+               dev_priv->regfile.saveTRANS_HSYNC_B = I915_READ(_TRANS_HSYNC_B);
+               dev_priv->regfile.saveTRANS_VTOTAL_B = I915_READ(_TRANS_VTOTAL_B);
+               dev_priv->regfile.saveTRANS_VBLANK_B = I915_READ(_TRANS_VBLANK_B);
+               dev_priv->regfile.saveTRANS_VSYNC_B = I915_READ(_TRANS_VSYNC_B);
+       }
+
+       dev_priv->regfile.saveDSPBCNTR = I915_READ(_DSPBCNTR);
+       dev_priv->regfile.saveDSPBSTRIDE = I915_READ(_DSPBSTRIDE);
+       dev_priv->regfile.saveDSPBSIZE = I915_READ(_DSPBSIZE);
+       dev_priv->regfile.saveDSPBPOS = I915_READ(_DSPBPOS);
+       dev_priv->regfile.saveDSPBADDR = I915_READ(_DSPBADDR);
        if (INTEL_INFO(dev)->gen >= 4) {
-               dev_priv->saveDSPBSURF = I915_READ(_DSPBSURF);
-               dev_priv->saveDSPBTILEOFF = I915_READ(_DSPBTILEOFF);
+               dev_priv->regfile.saveDSPBSURF = I915_READ(_DSPBSURF);
+               dev_priv->regfile.saveDSPBTILEOFF = I915_READ(_DSPBTILEOFF);
        }
        i915_save_palette(dev, PIPE_B);
-       dev_priv->savePIPEBSTAT = I915_READ(_PIPEBSTAT);
+       dev_priv->regfile.savePIPEBSTAT = I915_READ(_PIPEBSTAT);
 
        /* Fences */
        switch (INTEL_INFO(dev)->gen) {
        case 7:
        case 6:
                for (i = 0; i < 16; i++)
-                       dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
+                       dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
                break;
        case 5:
        case 4:
                for (i = 0; i < 16; i++)
-                       dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
+                       dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
                break;
        case 3:
                if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
                        for (i = 0; i < 8; i++)
-                               dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
+                               dev_priv->regfile.saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
        case 2:
                for (i = 0; i < 8; i++)
-                       dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
+                       dev_priv->regfile.saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
                break;
        }
 
+       /* CRT state */
+       if (HAS_PCH_SPLIT(dev))
+               dev_priv->regfile.saveADPA = I915_READ(PCH_ADPA);
+       else
+               dev_priv->regfile.saveADPA = I915_READ(ADPA);
+
        return;
 }
 
@@ -412,20 +418,20 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
        case 7:
        case 6:
                for (i = 0; i < 16; i++)
-                       I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]);
+                       I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->regfile.saveFENCE[i]);
                break;
        case 5:
        case 4:
                for (i = 0; i < 16; i++)
-                       I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]);
+                       I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->regfile.saveFENCE[i]);
                break;
        case 3:
        case 2:
                if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
                        for (i = 0; i < 8; i++)
-                               I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]);
+                               I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->regfile.saveFENCE[i+8]);
                for (i = 0; i < 8; i++)
-                       I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]);
+                       I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->regfile.saveFENCE[i]);
                break;
        }
 
@@ -447,158 +453,164 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
        }
 
        if (HAS_PCH_SPLIT(dev)) {
-               I915_WRITE(PCH_DREF_CONTROL, dev_priv->savePCH_DREF_CONTROL);
-               I915_WRITE(DISP_ARB_CTL, dev_priv->saveDISP_ARB_CTL);
+               I915_WRITE(PCH_DREF_CONTROL, dev_priv->regfile.savePCH_DREF_CONTROL);
+               I915_WRITE(DISP_ARB_CTL, dev_priv->regfile.saveDISP_ARB_CTL);
        }
 
        /* Pipe & plane A info */
        /* Prime the clock */
-       if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
-               I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A &
+       if (dev_priv->regfile.saveDPLL_A & DPLL_VCO_ENABLE) {
+               I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A &
                           ~DPLL_VCO_ENABLE);
                POSTING_READ(dpll_a_reg);
                udelay(150);
        }
-       I915_WRITE(fpa0_reg, dev_priv->saveFPA0);
-       I915_WRITE(fpa1_reg, dev_priv->saveFPA1);
+       I915_WRITE(fpa0_reg, dev_priv->regfile.saveFPA0);
+       I915_WRITE(fpa1_reg, dev_priv->regfile.saveFPA1);
        /* Actually enable it */
-       I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A);
+       I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A);
        POSTING_READ(dpll_a_reg);
        udelay(150);
        if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
-               I915_WRITE(_DPLL_A_MD, dev_priv->saveDPLL_A_MD);
+               I915_WRITE(_DPLL_A_MD, dev_priv->regfile.saveDPLL_A_MD);
                POSTING_READ(_DPLL_A_MD);
        }
        udelay(150);
 
        /* Restore mode */
-       I915_WRITE(_HTOTAL_A, dev_priv->saveHTOTAL_A);
-       I915_WRITE(_HBLANK_A, dev_priv->saveHBLANK_A);
-       I915_WRITE(_HSYNC_A, dev_priv->saveHSYNC_A);
-       I915_WRITE(_VTOTAL_A, dev_priv->saveVTOTAL_A);
-       I915_WRITE(_VBLANK_A, dev_priv->saveVBLANK_A);
-       I915_WRITE(_VSYNC_A, dev_priv->saveVSYNC_A);
+       I915_WRITE(_HTOTAL_A, dev_priv->regfile.saveHTOTAL_A);
+       I915_WRITE(_HBLANK_A, dev_priv->regfile.saveHBLANK_A);
+       I915_WRITE(_HSYNC_A, dev_priv->regfile.saveHSYNC_A);
+       I915_WRITE(_VTOTAL_A, dev_priv->regfile.saveVTOTAL_A);
+       I915_WRITE(_VBLANK_A, dev_priv->regfile.saveVBLANK_A);
+       I915_WRITE(_VSYNC_A, dev_priv->regfile.saveVSYNC_A);
        if (!HAS_PCH_SPLIT(dev))
-               I915_WRITE(_BCLRPAT_A, dev_priv->saveBCLRPAT_A);
+               I915_WRITE(_BCLRPAT_A, dev_priv->regfile.saveBCLRPAT_A);
 
        if (HAS_PCH_SPLIT(dev)) {
-               I915_WRITE(_PIPEA_DATA_M1, dev_priv->savePIPEA_DATA_M1);
-               I915_WRITE(_PIPEA_DATA_N1, dev_priv->savePIPEA_DATA_N1);
-               I915_WRITE(_PIPEA_LINK_M1, dev_priv->savePIPEA_LINK_M1);
-               I915_WRITE(_PIPEA_LINK_N1, dev_priv->savePIPEA_LINK_N1);
+               I915_WRITE(_PIPEA_DATA_M1, dev_priv->regfile.savePIPEA_DATA_M1);
+               I915_WRITE(_PIPEA_DATA_N1, dev_priv->regfile.savePIPEA_DATA_N1);
+               I915_WRITE(_PIPEA_LINK_M1, dev_priv->regfile.savePIPEA_LINK_M1);
+               I915_WRITE(_PIPEA_LINK_N1, dev_priv->regfile.savePIPEA_LINK_N1);
 
-               I915_WRITE(_FDI_RXA_CTL, dev_priv->saveFDI_RXA_CTL);
-               I915_WRITE(_FDI_TXA_CTL, dev_priv->saveFDI_TXA_CTL);
+               I915_WRITE(_FDI_RXA_CTL, dev_priv->regfile.saveFDI_RXA_CTL);
+               I915_WRITE(_FDI_TXA_CTL, dev_priv->regfile.saveFDI_TXA_CTL);
 
-               I915_WRITE(_PFA_CTL_1, dev_priv->savePFA_CTL_1);
-               I915_WRITE(_PFA_WIN_SZ, dev_priv->savePFA_WIN_SZ);
-               I915_WRITE(_PFA_WIN_POS, dev_priv->savePFA_WIN_POS);
+               I915_WRITE(_PFA_CTL_1, dev_priv->regfile.savePFA_CTL_1);
+               I915_WRITE(_PFA_WIN_SZ, dev_priv->regfile.savePFA_WIN_SZ);
+               I915_WRITE(_PFA_WIN_POS, dev_priv->regfile.savePFA_WIN_POS);
 
-               I915_WRITE(_TRANSACONF, dev_priv->saveTRANSACONF);
-               I915_WRITE(_TRANS_HTOTAL_A, dev_priv->saveTRANS_HTOTAL_A);
-               I915_WRITE(_TRANS_HBLANK_A, dev_priv->saveTRANS_HBLANK_A);
-               I915_WRITE(_TRANS_HSYNC_A, dev_priv->saveTRANS_HSYNC_A);
-               I915_WRITE(_TRANS_VTOTAL_A, dev_priv->saveTRANS_VTOTAL_A);
-               I915_WRITE(_TRANS_VBLANK_A, dev_priv->saveTRANS_VBLANK_A);
-               I915_WRITE(_TRANS_VSYNC_A, dev_priv->saveTRANS_VSYNC_A);
+               I915_WRITE(_TRANSACONF, dev_priv->regfile.saveTRANSACONF);
+               I915_WRITE(_TRANS_HTOTAL_A, dev_priv->regfile.saveTRANS_HTOTAL_A);
+               I915_WRITE(_TRANS_HBLANK_A, dev_priv->regfile.saveTRANS_HBLANK_A);
+               I915_WRITE(_TRANS_HSYNC_A, dev_priv->regfile.saveTRANS_HSYNC_A);
+               I915_WRITE(_TRANS_VTOTAL_A, dev_priv->regfile.saveTRANS_VTOTAL_A);
+               I915_WRITE(_TRANS_VBLANK_A, dev_priv->regfile.saveTRANS_VBLANK_A);
+               I915_WRITE(_TRANS_VSYNC_A, dev_priv->regfile.saveTRANS_VSYNC_A);
        }
 
        /* Restore plane info */
-       I915_WRITE(_DSPASIZE, dev_priv->saveDSPASIZE);
-       I915_WRITE(_DSPAPOS, dev_priv->saveDSPAPOS);
-       I915_WRITE(_PIPEASRC, dev_priv->savePIPEASRC);
-       I915_WRITE(_DSPAADDR, dev_priv->saveDSPAADDR);
-       I915_WRITE(_DSPASTRIDE, dev_priv->saveDSPASTRIDE);
+       I915_WRITE(_DSPASIZE, dev_priv->regfile.saveDSPASIZE);
+       I915_WRITE(_DSPAPOS, dev_priv->regfile.saveDSPAPOS);
+       I915_WRITE(_PIPEASRC, dev_priv->regfile.savePIPEASRC);
+       I915_WRITE(_DSPAADDR, dev_priv->regfile.saveDSPAADDR);
+       I915_WRITE(_DSPASTRIDE, dev_priv->regfile.saveDSPASTRIDE);
        if (INTEL_INFO(dev)->gen >= 4) {
-               I915_WRITE(_DSPASURF, dev_priv->saveDSPASURF);
-               I915_WRITE(_DSPATILEOFF, dev_priv->saveDSPATILEOFF);
+               I915_WRITE(_DSPASURF, dev_priv->regfile.saveDSPASURF);
+               I915_WRITE(_DSPATILEOFF, dev_priv->regfile.saveDSPATILEOFF);
        }
 
-       I915_WRITE(_PIPEACONF, dev_priv->savePIPEACONF);
+       I915_WRITE(_PIPEACONF, dev_priv->regfile.savePIPEACONF);
 
        i915_restore_palette(dev, PIPE_A);
        /* Enable the plane */
-       I915_WRITE(_DSPACNTR, dev_priv->saveDSPACNTR);
+       I915_WRITE(_DSPACNTR, dev_priv->regfile.saveDSPACNTR);
        I915_WRITE(_DSPAADDR, I915_READ(_DSPAADDR));
 
        /* Pipe & plane B info */
-       if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
-               I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B &
+       if (dev_priv->regfile.saveDPLL_B & DPLL_VCO_ENABLE) {
+               I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B &
                           ~DPLL_VCO_ENABLE);
                POSTING_READ(dpll_b_reg);
                udelay(150);
        }
-       I915_WRITE(fpb0_reg, dev_priv->saveFPB0);
-       I915_WRITE(fpb1_reg, dev_priv->saveFPB1);
+       I915_WRITE(fpb0_reg, dev_priv->regfile.saveFPB0);
+       I915_WRITE(fpb1_reg, dev_priv->regfile.saveFPB1);
        /* Actually enable it */
-       I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B);
+       I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B);
        POSTING_READ(dpll_b_reg);
        udelay(150);
        if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
-               I915_WRITE(_DPLL_B_MD, dev_priv->saveDPLL_B_MD);
+               I915_WRITE(_DPLL_B_MD, dev_priv->regfile.saveDPLL_B_MD);
                POSTING_READ(_DPLL_B_MD);
        }
        udelay(150);
 
        /* Restore mode */
-       I915_WRITE(_HTOTAL_B, dev_priv->saveHTOTAL_B);
-       I915_WRITE(_HBLANK_B, dev_priv->saveHBLANK_B);
-       I915_WRITE(_HSYNC_B, dev_priv->saveHSYNC_B);
-       I915_WRITE(_VTOTAL_B, dev_priv->saveVTOTAL_B);
-       I915_WRITE(_VBLANK_B, dev_priv->saveVBLANK_B);
-       I915_WRITE(_VSYNC_B, dev_priv->saveVSYNC_B);
+       I915_WRITE(_HTOTAL_B, dev_priv->regfile.saveHTOTAL_B);
+       I915_WRITE(_HBLANK_B, dev_priv->regfile.saveHBLANK_B);
+       I915_WRITE(_HSYNC_B, dev_priv->regfile.saveHSYNC_B);
+       I915_WRITE(_VTOTAL_B, dev_priv->regfile.saveVTOTAL_B);
+       I915_WRITE(_VBLANK_B, dev_priv->regfile.saveVBLANK_B);
+       I915_WRITE(_VSYNC_B, dev_priv->regfile.saveVSYNC_B);
        if (!HAS_PCH_SPLIT(dev))
-               I915_WRITE(_BCLRPAT_B, dev_priv->saveBCLRPAT_B);
+               I915_WRITE(_BCLRPAT_B, dev_priv->regfile.saveBCLRPAT_B);
 
        if (HAS_PCH_SPLIT(dev)) {
-               I915_WRITE(_PIPEB_DATA_M1, dev_priv->savePIPEB_DATA_M1);
-               I915_WRITE(_PIPEB_DATA_N1, dev_priv->savePIPEB_DATA_N1);
-               I915_WRITE(_PIPEB_LINK_M1, dev_priv->savePIPEB_LINK_M1);
-               I915_WRITE(_PIPEB_LINK_N1, dev_priv->savePIPEB_LINK_N1);
+               I915_WRITE(_PIPEB_DATA_M1, dev_priv->regfile.savePIPEB_DATA_M1);
+               I915_WRITE(_PIPEB_DATA_N1, dev_priv->regfile.savePIPEB_DATA_N1);
+               I915_WRITE(_PIPEB_LINK_M1, dev_priv->regfile.savePIPEB_LINK_M1);
+               I915_WRITE(_PIPEB_LINK_N1, dev_priv->regfile.savePIPEB_LINK_N1);
 
-               I915_WRITE(_FDI_RXB_CTL, dev_priv->saveFDI_RXB_CTL);
-               I915_WRITE(_FDI_TXB_CTL, dev_priv->saveFDI_TXB_CTL);
+               I915_WRITE(_FDI_RXB_CTL, dev_priv->regfile.saveFDI_RXB_CTL);
+               I915_WRITE(_FDI_TXB_CTL, dev_priv->regfile.saveFDI_TXB_CTL);
 
-               I915_WRITE(_PFB_CTL_1, dev_priv->savePFB_CTL_1);
-               I915_WRITE(_PFB_WIN_SZ, dev_priv->savePFB_WIN_SZ);
-               I915_WRITE(_PFB_WIN_POS, dev_priv->savePFB_WIN_POS);
+               I915_WRITE(_PFB_CTL_1, dev_priv->regfile.savePFB_CTL_1);
+               I915_WRITE(_PFB_WIN_SZ, dev_priv->regfile.savePFB_WIN_SZ);
+               I915_WRITE(_PFB_WIN_POS, dev_priv->regfile.savePFB_WIN_POS);
 
-               I915_WRITE(_TRANSBCONF, dev_priv->saveTRANSBCONF);
-               I915_WRITE(_TRANS_HTOTAL_B, dev_priv->saveTRANS_HTOTAL_B);
-               I915_WRITE(_TRANS_HBLANK_B, dev_priv->saveTRANS_HBLANK_B);
-               I915_WRITE(_TRANS_HSYNC_B, dev_priv->saveTRANS_HSYNC_B);
-               I915_WRITE(_TRANS_VTOTAL_B, dev_priv->saveTRANS_VTOTAL_B);
-               I915_WRITE(_TRANS_VBLANK_B, dev_priv->saveTRANS_VBLANK_B);
-               I915_WRITE(_TRANS_VSYNC_B, dev_priv->saveTRANS_VSYNC_B);
+               I915_WRITE(_TRANSBCONF, dev_priv->regfile.saveTRANSBCONF);
+               I915_WRITE(_TRANS_HTOTAL_B, dev_priv->regfile.saveTRANS_HTOTAL_B);
+               I915_WRITE(_TRANS_HBLANK_B, dev_priv->regfile.saveTRANS_HBLANK_B);
+               I915_WRITE(_TRANS_HSYNC_B, dev_priv->regfile.saveTRANS_HSYNC_B);
+               I915_WRITE(_TRANS_VTOTAL_B, dev_priv->regfile.saveTRANS_VTOTAL_B);
+               I915_WRITE(_TRANS_VBLANK_B, dev_priv->regfile.saveTRANS_VBLANK_B);
+               I915_WRITE(_TRANS_VSYNC_B, dev_priv->regfile.saveTRANS_VSYNC_B);
        }
 
        /* Restore plane info */
-       I915_WRITE(_DSPBSIZE, dev_priv->saveDSPBSIZE);
-       I915_WRITE(_DSPBPOS, dev_priv->saveDSPBPOS);
-       I915_WRITE(_PIPEBSRC, dev_priv->savePIPEBSRC);
-       I915_WRITE(_DSPBADDR, dev_priv->saveDSPBADDR);
-       I915_WRITE(_DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
+       I915_WRITE(_DSPBSIZE, dev_priv->regfile.saveDSPBSIZE);
+       I915_WRITE(_DSPBPOS, dev_priv->regfile.saveDSPBPOS);
+       I915_WRITE(_PIPEBSRC, dev_priv->regfile.savePIPEBSRC);
+       I915_WRITE(_DSPBADDR, dev_priv->regfile.saveDSPBADDR);
+       I915_WRITE(_DSPBSTRIDE, dev_priv->regfile.saveDSPBSTRIDE);
        if (INTEL_INFO(dev)->gen >= 4) {
-               I915_WRITE(_DSPBSURF, dev_priv->saveDSPBSURF);
-               I915_WRITE(_DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
+               I915_WRITE(_DSPBSURF, dev_priv->regfile.saveDSPBSURF);
+               I915_WRITE(_DSPBTILEOFF, dev_priv->regfile.saveDSPBTILEOFF);
        }
 
-       I915_WRITE(_PIPEBCONF, dev_priv->savePIPEBCONF);
+       I915_WRITE(_PIPEBCONF, dev_priv->regfile.savePIPEBCONF);
 
        i915_restore_palette(dev, PIPE_B);
        /* Enable the plane */
-       I915_WRITE(_DSPBCNTR, dev_priv->saveDSPBCNTR);
+       I915_WRITE(_DSPBCNTR, dev_priv->regfile.saveDSPBCNTR);
        I915_WRITE(_DSPBADDR, I915_READ(_DSPBADDR));
 
        /* Cursor state */
-       I915_WRITE(_CURAPOS, dev_priv->saveCURAPOS);
-       I915_WRITE(_CURACNTR, dev_priv->saveCURACNTR);
-       I915_WRITE(_CURABASE, dev_priv->saveCURABASE);
-       I915_WRITE(_CURBPOS, dev_priv->saveCURBPOS);
-       I915_WRITE(_CURBCNTR, dev_priv->saveCURBCNTR);
-       I915_WRITE(_CURBBASE, dev_priv->saveCURBBASE);
+       I915_WRITE(_CURAPOS, dev_priv->regfile.saveCURAPOS);
+       I915_WRITE(_CURACNTR, dev_priv->regfile.saveCURACNTR);
+       I915_WRITE(_CURABASE, dev_priv->regfile.saveCURABASE);
+       I915_WRITE(_CURBPOS, dev_priv->regfile.saveCURBPOS);
+       I915_WRITE(_CURBCNTR, dev_priv->regfile.saveCURBCNTR);
+       I915_WRITE(_CURBBASE, dev_priv->regfile.saveCURBBASE);
        if (IS_GEN2(dev))
-               I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
+               I915_WRITE(CURSIZE, dev_priv->regfile.saveCURSIZE);
+
+       /* CRT state */
+       if (HAS_PCH_SPLIT(dev))
+               I915_WRITE(PCH_ADPA, dev_priv->regfile.saveADPA);
+       else
+               I915_WRITE(ADPA, dev_priv->regfile.saveADPA);
 
        return;
 }
@@ -608,89 +620,84 @@ static void i915_save_display(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
 
        /* Display arbitration control */
-       dev_priv->saveDSPARB = I915_READ(DSPARB);
+       dev_priv->regfile.saveDSPARB = I915_READ(DSPARB);
 
        /* This is only meaningful in non-KMS mode */
-       /* Don't save them in KMS mode */
+       /* Don't regfile.save them in KMS mode */
        i915_save_modeset_reg(dev);
 
-       /* CRT state */
-       if (HAS_PCH_SPLIT(dev)) {
-               dev_priv->saveADPA = I915_READ(PCH_ADPA);
-       } else {
-               dev_priv->saveADPA = I915_READ(ADPA);
-       }
-
        /* LVDS state */
        if (HAS_PCH_SPLIT(dev)) {
-               dev_priv->savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
-               dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
-               dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
-               dev_priv->saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL);
-               dev_priv->saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2);
-               dev_priv->saveLVDS = I915_READ(PCH_LVDS);
+               dev_priv->regfile.savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
+               dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
+               dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
+               dev_priv->regfile.saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL);
+               dev_priv->regfile.saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2);
+               dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS);
        } else {
-               dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL);
-               dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
-               dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
-               dev_priv->saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL);
+               dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
+               dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
+               dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
+               dev_priv->regfile.saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL);
                if (INTEL_INFO(dev)->gen >= 4)
-                       dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
+                       dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
                if (IS_MOBILE(dev) && !IS_I830(dev))
-                       dev_priv->saveLVDS = I915_READ(LVDS);
+                       dev_priv->regfile.saveLVDS = I915_READ(LVDS);
        }
 
        if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
-               dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
+               dev_priv->regfile.savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
 
        if (HAS_PCH_SPLIT(dev)) {
-               dev_priv->savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS);
-               dev_priv->savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS);
-               dev_priv->savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR);
+               dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS);
+               dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS);
+               dev_priv->regfile.savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR);
        } else {
-               dev_priv->savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
-               dev_priv->savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
-               dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR);
-       }
-
-       /* Display Port state */
-       if (SUPPORTS_INTEGRATED_DP(dev)) {
-               dev_priv->saveDP_B = I915_READ(DP_B);
-               dev_priv->saveDP_C = I915_READ(DP_C);
-               dev_priv->saveDP_D = I915_READ(DP_D);
-               dev_priv->savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_GMCH_DATA_M);
-               dev_priv->savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_GMCH_DATA_M);
-               dev_priv->savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_GMCH_DATA_N);
-               dev_priv->savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_GMCH_DATA_N);
-               dev_priv->savePIPEA_DP_LINK_M = I915_READ(_PIPEA_DP_LINK_M);
-               dev_priv->savePIPEB_DP_LINK_M = I915_READ(_PIPEB_DP_LINK_M);
-               dev_priv->savePIPEA_DP_LINK_N = I915_READ(_PIPEA_DP_LINK_N);
-               dev_priv->savePIPEB_DP_LINK_N = I915_READ(_PIPEB_DP_LINK_N);
-       }
-       /* FIXME: save TV & SDVO state */
-
-       /* Only save FBC state on the platform that supports FBC */
+               dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
+               dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
+               dev_priv->regfile.savePP_DIVISOR = I915_READ(PP_DIVISOR);
+       }
+
+       if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
+               /* Display Port state */
+               if (SUPPORTS_INTEGRATED_DP(dev)) {
+                       dev_priv->regfile.saveDP_B = I915_READ(DP_B);
+                       dev_priv->regfile.saveDP_C = I915_READ(DP_C);
+                       dev_priv->regfile.saveDP_D = I915_READ(DP_D);
+                       dev_priv->regfile.savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_GMCH_DATA_M);
+                       dev_priv->regfile.savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_GMCH_DATA_M);
+                       dev_priv->regfile.savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_GMCH_DATA_N);
+                       dev_priv->regfile.savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_GMCH_DATA_N);
+                       dev_priv->regfile.savePIPEA_DP_LINK_M = I915_READ(_PIPEA_DP_LINK_M);
+                       dev_priv->regfile.savePIPEB_DP_LINK_M = I915_READ(_PIPEB_DP_LINK_M);
+                       dev_priv->regfile.savePIPEA_DP_LINK_N = I915_READ(_PIPEA_DP_LINK_N);
+                       dev_priv->regfile.savePIPEB_DP_LINK_N = I915_READ(_PIPEB_DP_LINK_N);
+               }
+               /* FIXME: regfile.save TV & SDVO state */
+       }
+
+       /* Only regfile.save FBC state on the platform that supports FBC */
        if (I915_HAS_FBC(dev)) {
                if (HAS_PCH_SPLIT(dev)) {
-                       dev_priv->saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE);
+                       dev_priv->regfile.saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE);
                } else if (IS_GM45(dev)) {
-                       dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE);
+                       dev_priv->regfile.saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE);
                } else {
-                       dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
-                       dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
-                       dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
-                       dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
+                       dev_priv->regfile.saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
+                       dev_priv->regfile.saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
+                       dev_priv->regfile.saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
+                       dev_priv->regfile.saveFBC_CONTROL = I915_READ(FBC_CONTROL);
                }
        }
 
        /* VGA state */
-       dev_priv->saveVGA0 = I915_READ(VGA0);
-       dev_priv->saveVGA1 = I915_READ(VGA1);
-       dev_priv->saveVGA_PD = I915_READ(VGA_PD);
+       dev_priv->regfile.saveVGA0 = I915_READ(VGA0);
+       dev_priv->regfile.saveVGA1 = I915_READ(VGA1);
+       dev_priv->regfile.saveVGA_PD = I915_READ(VGA_PD);
        if (HAS_PCH_SPLIT(dev))
-               dev_priv->saveVGACNTRL = I915_READ(CPU_VGACNTRL);
+               dev_priv->regfile.saveVGACNTRL = I915_READ(CPU_VGACNTRL);
        else
-               dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
+               dev_priv->regfile.saveVGACNTRL = I915_READ(VGACNTRL);
 
        i915_save_vga(dev);
 }
@@ -700,97 +707,95 @@ static void i915_restore_display(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
 
        /* Display arbitration */
-       I915_WRITE(DSPARB, dev_priv->saveDSPARB);
-
-       /* Display port ratios (must be done before clock is set) */
-       if (SUPPORTS_INTEGRATED_DP(dev)) {
-               I915_WRITE(_PIPEA_GMCH_DATA_M, dev_priv->savePIPEA_GMCH_DATA_M);
-               I915_WRITE(_PIPEB_GMCH_DATA_M, dev_priv->savePIPEB_GMCH_DATA_M);
-               I915_WRITE(_PIPEA_GMCH_DATA_N, dev_priv->savePIPEA_GMCH_DATA_N);
-               I915_WRITE(_PIPEB_GMCH_DATA_N, dev_priv->savePIPEB_GMCH_DATA_N);
-               I915_WRITE(_PIPEA_DP_LINK_M, dev_priv->savePIPEA_DP_LINK_M);
-               I915_WRITE(_PIPEB_DP_LINK_M, dev_priv->savePIPEB_DP_LINK_M);
-               I915_WRITE(_PIPEA_DP_LINK_N, dev_priv->savePIPEA_DP_LINK_N);
-               I915_WRITE(_PIPEB_DP_LINK_N, dev_priv->savePIPEB_DP_LINK_N);
+       I915_WRITE(DSPARB, dev_priv->regfile.saveDSPARB);
+
+       if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
+               /* Display port ratios (must be done before clock is set) */
+               if (SUPPORTS_INTEGRATED_DP(dev)) {
+                       I915_WRITE(_PIPEA_GMCH_DATA_M, dev_priv->regfile.savePIPEA_GMCH_DATA_M);
+                       I915_WRITE(_PIPEB_GMCH_DATA_M, dev_priv->regfile.savePIPEB_GMCH_DATA_M);
+                       I915_WRITE(_PIPEA_GMCH_DATA_N, dev_priv->regfile.savePIPEA_GMCH_DATA_N);
+                       I915_WRITE(_PIPEB_GMCH_DATA_N, dev_priv->regfile.savePIPEB_GMCH_DATA_N);
+                       I915_WRITE(_PIPEA_DP_LINK_M, dev_priv->regfile.savePIPEA_DP_LINK_M);
+                       I915_WRITE(_PIPEB_DP_LINK_M, dev_priv->regfile.savePIPEB_DP_LINK_M);
+                       I915_WRITE(_PIPEA_DP_LINK_N, dev_priv->regfile.savePIPEA_DP_LINK_N);
+                       I915_WRITE(_PIPEB_DP_LINK_N, dev_priv->regfile.savePIPEB_DP_LINK_N);
+               }
        }
 
        /* This is only meaningful in non-KMS mode */
        /* Don't restore them in KMS mode */
        i915_restore_modeset_reg(dev);
 
-       /* CRT state */
-       if (HAS_PCH_SPLIT(dev))
-               I915_WRITE(PCH_ADPA, dev_priv->saveADPA);
-       else
-               I915_WRITE(ADPA, dev_priv->saveADPA);
-
        /* LVDS state */
        if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
-               I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
+               I915_WRITE(BLC_PWM_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
 
        if (HAS_PCH_SPLIT(dev)) {
-               I915_WRITE(PCH_LVDS, dev_priv->saveLVDS);
+               I915_WRITE(PCH_LVDS, dev_priv->regfile.saveLVDS);
        } else if (IS_MOBILE(dev) && !IS_I830(dev))
-               I915_WRITE(LVDS, dev_priv->saveLVDS);
+               I915_WRITE(LVDS, dev_priv->regfile.saveLVDS);
 
        if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
-               I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL);
+               I915_WRITE(PFIT_CONTROL, dev_priv->regfile.savePFIT_CONTROL);
 
        if (HAS_PCH_SPLIT(dev)) {
-               I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->saveBLC_PWM_CTL);
-               I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->saveBLC_PWM_CTL2);
+               I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->regfile.saveBLC_PWM_CTL);
+               I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
                /* NOTE: BLC_PWM_CPU_CTL must be written after BLC_PWM_CPU_CTL2;
                 * otherwise we get blank eDP screen after S3 on some machines
                 */
-               I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->saveBLC_CPU_PWM_CTL2);
-               I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL);
-               I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS);
-               I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
-               I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR);
-               I915_WRITE(PCH_PP_CONTROL, dev_priv->savePP_CONTROL);
+               I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->regfile.saveBLC_CPU_PWM_CTL2);
+               I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->regfile.saveBLC_CPU_PWM_CTL);
+               I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
+               I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
+               I915_WRITE(PCH_PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
+               I915_WRITE(PCH_PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
                I915_WRITE(RSTDBYCTL,
-                          dev_priv->saveMCHBAR_RENDER_STANDBY);
+                          dev_priv->regfile.saveMCHBAR_RENDER_STANDBY);
        } else {
-               I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
-               I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
-               I915_WRITE(BLC_HIST_CTL, dev_priv->saveBLC_HIST_CTL);
-               I915_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS);
-               I915_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
-               I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR);
-               I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
-       }
-
-       /* Display Port state */
-       if (SUPPORTS_INTEGRATED_DP(dev)) {
-               I915_WRITE(DP_B, dev_priv->saveDP_B);
-               I915_WRITE(DP_C, dev_priv->saveDP_C);
-               I915_WRITE(DP_D, dev_priv->saveDP_D);
+               I915_WRITE(PFIT_PGM_RATIOS, dev_priv->regfile.savePFIT_PGM_RATIOS);
+               I915_WRITE(BLC_PWM_CTL, dev_priv->regfile.saveBLC_PWM_CTL);
+               I915_WRITE(BLC_HIST_CTL, dev_priv->regfile.saveBLC_HIST_CTL);
+               I915_WRITE(PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
+               I915_WRITE(PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
+               I915_WRITE(PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
+               I915_WRITE(PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
+       }
+
+       if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
+               /* Display Port state */
+               if (SUPPORTS_INTEGRATED_DP(dev)) {
+                       I915_WRITE(DP_B, dev_priv->regfile.saveDP_B);
+                       I915_WRITE(DP_C, dev_priv->regfile.saveDP_C);
+                       I915_WRITE(DP_D, dev_priv->regfile.saveDP_D);
+               }
+               /* FIXME: restore TV & SDVO state */
        }
-       /* FIXME: restore TV & SDVO state */
 
        /* only restore FBC info on the platform that supports FBC*/
        intel_disable_fbc(dev);
        if (I915_HAS_FBC(dev)) {
                if (HAS_PCH_SPLIT(dev)) {
-                       I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
+                       I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->regfile.saveDPFC_CB_BASE);
                } else if (IS_GM45(dev)) {
-                       I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
+                       I915_WRITE(DPFC_CB_BASE, dev_priv->regfile.saveDPFC_CB_BASE);
                } else {
-                       I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
-                       I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
-                       I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
-                       I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL);
+                       I915_WRITE(FBC_CFB_BASE, dev_priv->regfile.saveFBC_CFB_BASE);
+                       I915_WRITE(FBC_LL_BASE, dev_priv->regfile.saveFBC_LL_BASE);
+                       I915_WRITE(FBC_CONTROL2, dev_priv->regfile.saveFBC_CONTROL2);
+                       I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL);
                }
        }
        /* VGA state */
        if (HAS_PCH_SPLIT(dev))
-               I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL);
+               I915_WRITE(CPU_VGACNTRL, dev_priv->regfile.saveVGACNTRL);
        else
-               I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
+               I915_WRITE(VGACNTRL, dev_priv->regfile.saveVGACNTRL);
 
-       I915_WRITE(VGA0, dev_priv->saveVGA0);
-       I915_WRITE(VGA1, dev_priv->saveVGA1);
-       I915_WRITE(VGA_PD, dev_priv->saveVGA_PD);
+       I915_WRITE(VGA0, dev_priv->regfile.saveVGA0);
+       I915_WRITE(VGA1, dev_priv->regfile.saveVGA1);
+       I915_WRITE(VGA_PD, dev_priv->regfile.saveVGA_PD);
        POSTING_READ(VGA_PD);
        udelay(150);
 
@@ -802,46 +807,45 @@ int i915_save_state(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
        int i;
 
-       pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
+       pci_read_config_byte(dev->pdev, LBB, &dev_priv->regfile.saveLBB);
 
        mutex_lock(&dev->struct_mutex);
 
-       /* Hardware status page */
-       dev_priv->saveHWS = I915_READ(HWS_PGA);
-
        i915_save_display(dev);
 
-       /* Interrupt state */
-       if (HAS_PCH_SPLIT(dev)) {
-               dev_priv->saveDEIER = I915_READ(DEIER);
-               dev_priv->saveDEIMR = I915_READ(DEIMR);
-               dev_priv->saveGTIER = I915_READ(GTIER);
-               dev_priv->saveGTIMR = I915_READ(GTIMR);
-               dev_priv->saveFDI_RXA_IMR = I915_READ(_FDI_RXA_IMR);
-               dev_priv->saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR);
-               dev_priv->saveMCHBAR_RENDER_STANDBY =
-                       I915_READ(RSTDBYCTL);
-               dev_priv->savePCH_PORT_HOTPLUG = I915_READ(PCH_PORT_HOTPLUG);
-       } else {
-               dev_priv->saveIER = I915_READ(IER);
-               dev_priv->saveIMR = I915_READ(IMR);
+       if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
+               /* Interrupt state */
+               if (HAS_PCH_SPLIT(dev)) {
+                       dev_priv->regfile.saveDEIER = I915_READ(DEIER);
+                       dev_priv->regfile.saveDEIMR = I915_READ(DEIMR);
+                       dev_priv->regfile.saveGTIER = I915_READ(GTIER);
+                       dev_priv->regfile.saveGTIMR = I915_READ(GTIMR);
+                       dev_priv->regfile.saveFDI_RXA_IMR = I915_READ(_FDI_RXA_IMR);
+                       dev_priv->regfile.saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR);
+                       dev_priv->regfile.saveMCHBAR_RENDER_STANDBY =
+                               I915_READ(RSTDBYCTL);
+                       dev_priv->regfile.savePCH_PORT_HOTPLUG = I915_READ(PCH_PORT_HOTPLUG);
+               } else {
+                       dev_priv->regfile.saveIER = I915_READ(IER);
+                       dev_priv->regfile.saveIMR = I915_READ(IMR);
+               }
        }
 
        intel_disable_gt_powersave(dev);
 
        /* Cache mode state */
-       dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
+       dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
 
        /* Memory Arbitration state */
-       dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
+       dev_priv->regfile.saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
 
        /* Scratch space */
        for (i = 0; i < 16; i++) {
-               dev_priv->saveSWF0[i] = I915_READ(SWF00 + (i << 2));
-               dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2));
+               dev_priv->regfile.saveSWF0[i] = I915_READ(SWF00 + (i << 2));
+               dev_priv->regfile.saveSWF1[i] = I915_READ(SWF10 + (i << 2));
        }
        for (i = 0; i < 3; i++)
-               dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
+               dev_priv->regfile.saveSWF2[i] = I915_READ(SWF30 + (i << 2));
 
        mutex_unlock(&dev->struct_mutex);
 
@@ -853,41 +857,40 @@ int i915_restore_state(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
        int i;
 
-       pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
+       pci_write_config_byte(dev->pdev, LBB, dev_priv->regfile.saveLBB);
 
        mutex_lock(&dev->struct_mutex);
 
-       /* Hardware status page */
-       I915_WRITE(HWS_PGA, dev_priv->saveHWS);
-
        i915_restore_display(dev);
 
-       /* Interrupt state */
-       if (HAS_PCH_SPLIT(dev)) {
-               I915_WRITE(DEIER, dev_priv->saveDEIER);
-               I915_WRITE(DEIMR, dev_priv->saveDEIMR);
-               I915_WRITE(GTIER, dev_priv->saveGTIER);
-               I915_WRITE(GTIMR, dev_priv->saveGTIMR);
-               I915_WRITE(_FDI_RXA_IMR, dev_priv->saveFDI_RXA_IMR);
-               I915_WRITE(_FDI_RXB_IMR, dev_priv->saveFDI_RXB_IMR);
-               I915_WRITE(PCH_PORT_HOTPLUG, dev_priv->savePCH_PORT_HOTPLUG);
-       } else {
-               I915_WRITE(IER, dev_priv->saveIER);
-               I915_WRITE(IMR, dev_priv->saveIMR);
+       if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
+               /* Interrupt state */
+               if (HAS_PCH_SPLIT(dev)) {
+                       I915_WRITE(DEIER, dev_priv->regfile.saveDEIER);
+                       I915_WRITE(DEIMR, dev_priv->regfile.saveDEIMR);
+                       I915_WRITE(GTIER, dev_priv->regfile.saveGTIER);
+                       I915_WRITE(GTIMR, dev_priv->regfile.saveGTIMR);
+                       I915_WRITE(_FDI_RXA_IMR, dev_priv->regfile.saveFDI_RXA_IMR);
+                       I915_WRITE(_FDI_RXB_IMR, dev_priv->regfile.saveFDI_RXB_IMR);
+                       I915_WRITE(PCH_PORT_HOTPLUG, dev_priv->regfile.savePCH_PORT_HOTPLUG);
+               } else {
+                       I915_WRITE(IER, dev_priv->regfile.saveIER);
+                       I915_WRITE(IMR, dev_priv->regfile.saveIMR);
+               }
        }
 
        /* Cache mode state */
-       I915_WRITE(CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
+       I915_WRITE(CACHE_MODE_0, dev_priv->regfile.saveCACHE_MODE_0 | 0xffff0000);
 
        /* Memory arbitration state */
-       I915_WRITE(MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000);
+       I915_WRITE(MI_ARB_STATE, dev_priv->regfile.saveMI_ARB_STATE | 0xffff0000);
 
        for (i = 0; i < 16; i++) {
-               I915_WRITE(SWF00 + (i << 2), dev_priv->saveSWF0[i]);
-               I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i]);
+               I915_WRITE(SWF00 + (i << 2), dev_priv->regfile.saveSWF0[i]);
+               I915_WRITE(SWF10 + (i << 2), dev_priv->regfile.saveSWF1[i]);
        }
        for (i = 0; i < 3; i++)
-               I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
+               I915_WRITE(SWF30 + (i << 2), dev_priv->regfile.saveSWF2[i]);
 
        mutex_unlock(&dev->struct_mutex);
 
index 903eebd..9462081 100644 (file)
@@ -97,7 +97,7 @@ static struct attribute_group rc6_attr_group = {
 
 static int l3_access_valid(struct drm_device *dev, loff_t offset)
 {
-       if (!IS_IVYBRIDGE(dev))
+       if (!HAS_L3_GPU_CACHE(dev))
                return -EPERM;
 
        if (offset % 4 != 0)
@@ -162,7 +162,7 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
        if (ret)
                return ret;
 
-       if (!dev_priv->mm.l3_remap_info) {
+       if (!dev_priv->l3_parity.remap_info) {
                temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
                if (!temp) {
                        mutex_unlock(&drm_dev->struct_mutex);
@@ -182,9 +182,9 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
         * at this point it is left as a TODO.
        */
        if (temp)
-               dev_priv->mm.l3_remap_info = temp;
+               dev_priv->l3_parity.remap_info = temp;
 
-       memcpy(dev_priv->mm.l3_remap_info + (offset/4),
+       memcpy(dev_priv->l3_parity.remap_info + (offset/4),
               buf + (offset/4),
               count);
 
@@ -211,12 +211,9 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
        struct drm_i915_private *dev_priv = dev->dev_private;
        int ret;
 
-       ret = i915_mutex_lock_interruptible(dev);
-       if (ret)
-               return ret;
-
+       mutex_lock(&dev_priv->rps.hw_lock);
        ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER;
-       mutex_unlock(&dev->struct_mutex);
+       mutex_unlock(&dev_priv->rps.hw_lock);
 
        return snprintf(buf, PAGE_SIZE, "%d", ret);
 }
@@ -228,12 +225,9 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute
        struct drm_i915_private *dev_priv = dev->dev_private;
        int ret;
 
-       ret = i915_mutex_lock_interruptible(dev);
-       if (ret)
-               return ret;
-
+       mutex_lock(&dev_priv->rps.hw_lock);
        ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
-       mutex_unlock(&dev->struct_mutex);
+       mutex_unlock(&dev_priv->rps.hw_lock);
 
        return snprintf(buf, PAGE_SIZE, "%d", ret);
 }
@@ -254,16 +248,14 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
 
        val /= GT_FREQUENCY_MULTIPLIER;
 
-       ret = mutex_lock_interruptible(&dev->struct_mutex);
-       if (ret)
-               return ret;
+       mutex_lock(&dev_priv->rps.hw_lock);
 
        rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
        hw_max = (rp_state_cap & 0xff);
        hw_min = ((rp_state_cap & 0xff0000) >> 16);
 
        if (val < hw_min || val > hw_max || val < dev_priv->rps.min_delay) {
-               mutex_unlock(&dev->struct_mutex);
+               mutex_unlock(&dev_priv->rps.hw_lock);
                return -EINVAL;
        }
 
@@ -272,7 +264,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
 
        dev_priv->rps.max_delay = val;
 
-       mutex_unlock(&dev->struct_mutex);
+       mutex_unlock(&dev_priv->rps.hw_lock);
 
        return count;
 }
@@ -284,12 +276,9 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute
        struct drm_i915_private *dev_priv = dev->dev_private;
        int ret;
 
-       ret = i915_mutex_lock_interruptible(dev);
-       if (ret)
-               return ret;
-
+       mutex_lock(&dev_priv->rps.hw_lock);
        ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
-       mutex_unlock(&dev->struct_mutex);
+       mutex_unlock(&dev_priv->rps.hw_lock);
 
        return snprintf(buf, PAGE_SIZE, "%d", ret);
 }
@@ -310,16 +299,14 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
 
        val /= GT_FREQUENCY_MULTIPLIER;
 
-       ret = mutex_lock_interruptible(&dev->struct_mutex);
-       if (ret)
-               return ret;
+       mutex_lock(&dev_priv->rps.hw_lock);
 
        rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
        hw_max = (rp_state_cap & 0xff);
        hw_min = ((rp_state_cap & 0xff0000) >> 16);
 
        if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) {
-               mutex_unlock(&dev->struct_mutex);
+               mutex_unlock(&dev_priv->rps.hw_lock);
                return -EINVAL;
        }
 
@@ -328,7 +315,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
 
        dev_priv->rps.min_delay = val;
 
-       mutex_unlock(&dev->struct_mutex);
+       mutex_unlock(&dev_priv->rps.hw_lock);
 
        return count;
 
index 8134421..3db4a68 100644 (file)
@@ -229,24 +229,26 @@ TRACE_EVENT(i915_gem_evict_everything,
 );
 
 TRACE_EVENT(i915_gem_ring_dispatch,
-           TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
-           TP_ARGS(ring, seqno),
+           TP_PROTO(struct intel_ring_buffer *ring, u32 seqno, u32 flags),
+           TP_ARGS(ring, seqno, flags),
 
            TP_STRUCT__entry(
                             __field(u32, dev)
                             __field(u32, ring)
                             __field(u32, seqno)
+                            __field(u32, flags)
                             ),
 
            TP_fast_assign(
                           __entry->dev = ring->dev->primary->index;
                           __entry->ring = ring->id;
                           __entry->seqno = seqno;
+                          __entry->flags = flags;
                           i915_trace_irq_get(ring, seqno);
                           ),
 
-           TP_printk("dev=%u, ring=%u, seqno=%u",
-                     __entry->dev, __entry->ring, __entry->seqno)
+           TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
+                     __entry->dev, __entry->ring, __entry->seqno, __entry->flags)
 );
 
 TRACE_EVENT(i915_gem_ring_flush,
index 56846ed..55ffba1 100644 (file)
@@ -755,7 +755,8 @@ void intel_setup_bios(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
 
         /* Set the Panel Power On/Off timings if uninitialized. */
-       if ((I915_READ(PP_ON_DELAYS) == 0) && (I915_READ(PP_OFF_DELAYS) == 0)) {
+       if (!HAS_PCH_SPLIT(dev) &&
+           I915_READ(PP_ON_DELAYS) == 0 && I915_READ(PP_OFF_DELAYS) == 0) {
                /* Set T2 to 40ms and T5 to 200ms */
                I915_WRITE(PP_ON_DELAYS, 0x019007d0);
 
index 6345878..9293878 100644 (file)
@@ -198,6 +198,11 @@ static int intel_crt_mode_valid(struct drm_connector *connector,
        if (mode->clock > max_clock)
                return MODE_CLOCK_HIGH;
 
+       /* The FDI receiver on LPT only supports 8bpc and only has 2 lanes. */
+       if (HAS_PCH_LPT(dev) &&
+           (ironlake_get_lanes_required(mode->clock, 270000, 24) > 2))
+               return MODE_CLOCK_HIGH;
+
        return MODE_OK;
 }
 
@@ -221,14 +226,20 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 adpa;
 
-       adpa = ADPA_HOTPLUG_BITS;
+       if (HAS_PCH_SPLIT(dev))
+               adpa = ADPA_HOTPLUG_BITS;
+       else
+               adpa = 0;
+
        if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
                adpa |= ADPA_HSYNC_ACTIVE_HIGH;
        if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
                adpa |= ADPA_VSYNC_ACTIVE_HIGH;
 
        /* For CPT allow 3 pipe config, for others just use A or B */
-       if (HAS_PCH_CPT(dev))
+       if (HAS_PCH_LPT(dev))
+               ; /* Those bits don't exist here */
+       else if (HAS_PCH_CPT(dev))
                adpa |= PORT_TRANS_SEL_CPT(intel_crtc->pipe);
        else if (intel_crtc->pipe == 0)
                adpa |= ADPA_PIPE_A_SELECT;
@@ -401,12 +412,16 @@ static int intel_crt_ddc_get_modes(struct drm_connector *connector,
                                struct i2c_adapter *adapter)
 {
        struct edid *edid;
+       int ret;
 
        edid = intel_crt_get_edid(connector, adapter);
        if (!edid)
                return 0;
 
-       return intel_connector_update_modes(connector, edid);
+       ret = intel_connector_update_modes(connector, edid);
+       kfree(edid);
+
+       return ret;
 }
 
 static bool intel_crt_detect_ddc(struct drm_connector *connector)
@@ -644,10 +659,22 @@ static int intel_crt_set_property(struct drm_connector *connector,
 static void intel_crt_reset(struct drm_connector *connector)
 {
        struct drm_device *dev = connector->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crt *crt = intel_attached_crt(connector);
 
-       if (HAS_PCH_SPLIT(dev))
+       if (HAS_PCH_SPLIT(dev)) {
+               u32 adpa;
+
+               adpa = I915_READ(PCH_ADPA);
+               adpa &= ~ADPA_CRT_HOTPLUG_MASK;
+               adpa |= ADPA_HOTPLUG_BITS;
+               I915_WRITE(PCH_ADPA, adpa);
+               POSTING_READ(PCH_ADPA);
+
+               DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa);
                crt->force_hotplug_required = 1;
+       }
+
 }
 
 /*
@@ -729,7 +756,7 @@ void intel_crt_init(struct drm_device *dev)
 
        crt->base.type = INTEL_OUTPUT_ANALOG;
        crt->base.cloneable = true;
-       if (IS_HASWELL(dev) || IS_I830(dev))
+       if (IS_I830(dev))
                crt->base.crtc_mask = (1 << 0);
        else
                crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
@@ -749,7 +776,10 @@ void intel_crt_init(struct drm_device *dev)
 
        crt->base.disable = intel_disable_crt;
        crt->base.enable = intel_enable_crt;
-       crt->base.get_hw_state = intel_crt_get_hw_state;
+       if (IS_HASWELL(dev))
+               crt->base.get_hw_state = intel_ddi_get_hw_state;
+       else
+               crt->base.get_hw_state = intel_crt_get_hw_state;
        intel_connector->get_hw_state = intel_connector_get_hw_state;
 
        drm_encoder_helper_add(&crt->base.base, &crt_encoder_funcs);
@@ -766,18 +796,14 @@ void intel_crt_init(struct drm_device *dev)
         * Configure the automatic hotplug detection stuff
         */
        crt->force_hotplug_required = 0;
-       if (HAS_PCH_SPLIT(dev)) {
-               u32 adpa;
-
-               adpa = I915_READ(PCH_ADPA);
-               adpa &= ~ADPA_CRT_HOTPLUG_MASK;
-               adpa |= ADPA_HOTPLUG_BITS;
-               I915_WRITE(PCH_ADPA, adpa);
-               POSTING_READ(PCH_ADPA);
-
-               DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa);
-               crt->force_hotplug_required = 1;
-       }
 
        dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS;
+
+       /*
+        * TODO: find a proper way to discover whether we need to set the
+        * polarity reversal bit or not, instead of relying on the BIOS.
+        */
+       if (HAS_PCH_LPT(dev))
+               dev_priv->fdi_rx_polarity_reversed =
+                    !!(I915_READ(_FDI_RXA_CTL) & FDI_RX_POLARITY_REVERSED_LPT);
 }
index bfe3754..4bad0f7 100644 (file)
@@ -58,6 +58,26 @@ static const u32 hsw_ddi_translations_fdi[] = {
        0x00FFFFFF, 0x00040006          /* HDMI parameters */
 };
 
+static enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
+{
+       struct drm_encoder *encoder = &intel_encoder->base;
+       int type = intel_encoder->type;
+
+       if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP ||
+           type == INTEL_OUTPUT_HDMI || type == INTEL_OUTPUT_UNKNOWN) {
+               struct intel_digital_port *intel_dig_port =
+                       enc_to_dig_port(encoder);
+               return intel_dig_port->port;
+
+       } else if (type == INTEL_OUTPUT_ANALOG) {
+               return PORT_E;
+
+       } else {
+               DRM_ERROR("Invalid DDI encoder type %d\n", type);
+               BUG();
+       }
+}
+
 /* On Haswell, DDI port buffers must be programmed with correct values
  * in advance. The buffer values are different for FDI and DP modes,
  * but the HDMI/DVI fields are shared among those. So we program the DDI
@@ -118,6 +138,19 @@ static const long hsw_ddi_buf_ctl_values[] = {
        DDI_BUF_EMP_800MV_3_5DB_HSW
 };
 
+static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
+                                   enum port port)
+{
+       uint32_t reg = DDI_BUF_CTL(port);
+       int i;
+
+       for (i = 0; i < 8; i++) {
+               udelay(1);
+               if (I915_READ(reg) & DDI_BUF_IS_IDLE)
+                       return;
+       }
+       DRM_ERROR("Timeout waiting for DDI BUF %c idle bit\n", port_name(port));
+}
 
 /* Starting with Haswell, different DDI ports can work in FDI mode for
  * connection to the PCH-located connectors. For this, it is necessary to train
@@ -133,25 +166,36 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       int pipe = intel_crtc->pipe;
-       u32 reg, temp, i;
-
-       /* Configure CPU PLL, wait for warmup */
-       I915_WRITE(SPLL_CTL,
-                       SPLL_PLL_ENABLE |
-                       SPLL_PLL_FREQ_1350MHz |
-                       SPLL_PLL_SCC);
+       u32 temp, i, rx_ctl_val;
 
-       /* Use SPLL to drive the output when in FDI mode */
-       I915_WRITE(PORT_CLK_SEL(PORT_E),
-                       PORT_CLK_SEL_SPLL);
-       I915_WRITE(PIPE_CLK_SEL(pipe),
-                       PIPE_CLK_SEL_PORT(PORT_E));
-
-       udelay(20);
-
-       /* Start the training iterating through available voltages and emphasis */
-       for (i=0; i < ARRAY_SIZE(hsw_ddi_buf_ctl_values); i++) {
+       /* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the
+        * mode set "sequence for CRT port" document:
+        * - TP1 to TP2 time with the default value
+        * - FDI delay to 90h
+        */
+       I915_WRITE(_FDI_RXA_MISC, FDI_RX_PWRDN_LANE1_VAL(2) |
+                                 FDI_RX_PWRDN_LANE0_VAL(2) |
+                                 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
+
+       /* Enable the PCH Receiver FDI PLL */
+       rx_ctl_val = FDI_RX_PLL_ENABLE | FDI_RX_ENHANCE_FRAME_ENABLE |
+                    ((intel_crtc->fdi_lanes - 1) << 19);
+       if (dev_priv->fdi_rx_polarity_reversed)
+               rx_ctl_val |= FDI_RX_POLARITY_REVERSED_LPT;
+       I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
+       POSTING_READ(_FDI_RXA_CTL);
+       udelay(220);
+
+       /* Switch from Rawclk to PCDclk */
+       rx_ctl_val |= FDI_PCDCLK;
+       I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
+
+       /* Configure Port Clock Select */
+       I915_WRITE(PORT_CLK_SEL(PORT_E), intel_crtc->ddi_pll_sel);
+
+       /* Start the training iterating through available voltages and emphasis,
+        * testing each value twice. */
+       for (i = 0; i < ARRAY_SIZE(hsw_ddi_buf_ctl_values) * 2; i++) {
                /* Configure DP_TP_CTL with auto-training */
                I915_WRITE(DP_TP_CTL(PORT_E),
                                        DP_TP_CTL_FDI_AUTOTRAIN |
@@ -160,103 +204,75 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
                                        DP_TP_CTL_ENABLE);
 
                /* Configure and enable DDI_BUF_CTL for DDI E with next voltage */
-               temp = I915_READ(DDI_BUF_CTL(PORT_E));
-               temp = (temp & ~DDI_BUF_EMP_MASK);
                I915_WRITE(DDI_BUF_CTL(PORT_E),
-                               temp |
-                               DDI_BUF_CTL_ENABLE |
-                               DDI_PORT_WIDTH_X2 |
-                               hsw_ddi_buf_ctl_values[i]);
+                          DDI_BUF_CTL_ENABLE |
+                          ((intel_crtc->fdi_lanes - 1) << 1) |
+                          hsw_ddi_buf_ctl_values[i / 2]);
+               POSTING_READ(DDI_BUF_CTL(PORT_E));
 
                udelay(600);
 
-               /* We need to program FDI_RX_MISC with the default TP1 to TP2
-                * values before enabling the receiver, and configure the delay
-                * for the FDI timing generator to 90h. Luckily, all the other
-                * bits are supposed to be zeroed, so we can write those values
-                * directly.
-                */
-               I915_WRITE(FDI_RX_MISC(pipe), FDI_RX_TP1_TO_TP2_48 |
-                               FDI_RX_FDI_DELAY_90);
-
-               /* Enable CPU FDI Receiver with auto-training */
-               reg = FDI_RX_CTL(pipe);
-               I915_WRITE(reg,
-                               I915_READ(reg) |
-                                       FDI_LINK_TRAIN_AUTO |
-                                       FDI_RX_ENABLE |
-                                       FDI_LINK_TRAIN_PATTERN_1_CPT |
-                                       FDI_RX_ENHANCE_FRAME_ENABLE |
-                                       FDI_PORT_WIDTH_2X_LPT |
-                                       FDI_RX_PLL_ENABLE);
-               POSTING_READ(reg);
-               udelay(100);
+               /* Program PCH FDI Receiver TU */
+               I915_WRITE(_FDI_RXA_TUSIZE1, TU_SIZE(64));
+
+               /* Enable PCH FDI Receiver with auto-training */
+               rx_ctl_val |= FDI_RX_ENABLE | FDI_LINK_TRAIN_AUTO;
+               I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
+               POSTING_READ(_FDI_RXA_CTL);
+
+               /* Wait for FDI receiver lane calibration */
+               udelay(30);
+
+               /* Unset FDI_RX_MISC pwrdn lanes */
+               temp = I915_READ(_FDI_RXA_MISC);
+               temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
+               I915_WRITE(_FDI_RXA_MISC, temp);
+               POSTING_READ(_FDI_RXA_MISC);
+
+               /* Wait for FDI auto training time */
+               udelay(5);
 
                temp = I915_READ(DP_TP_STATUS(PORT_E));
                if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) {
-                       DRM_DEBUG_DRIVER("BUF_CTL training done on %d step\n", i);
+                       DRM_DEBUG_KMS("FDI link training done on step %d\n", i);
 
                        /* Enable normal pixel sending for FDI */
                        I915_WRITE(DP_TP_CTL(PORT_E),
-                                               DP_TP_CTL_FDI_AUTOTRAIN |
-                                               DP_TP_CTL_LINK_TRAIN_NORMAL |
-                                               DP_TP_CTL_ENHANCED_FRAME_ENABLE |
-                                               DP_TP_CTL_ENABLE);
-
-                       /* Enable PIPE_DDI_FUNC_CTL for the pipe to work in FDI mode */
-                       temp = I915_READ(DDI_FUNC_CTL(pipe));
-                       temp &= ~PIPE_DDI_PORT_MASK;
-                       temp |= PIPE_DDI_SELECT_PORT(PORT_E) |
-                                       PIPE_DDI_MODE_SELECT_FDI |
-                                       PIPE_DDI_FUNC_ENABLE |
-                                       PIPE_DDI_PORT_WIDTH_X2;
-                       I915_WRITE(DDI_FUNC_CTL(pipe),
-                                       temp);
-                       break;
-               } else {
-                       DRM_ERROR("Error training BUF_CTL %d\n", i);
+                                  DP_TP_CTL_FDI_AUTOTRAIN |
+                                  DP_TP_CTL_LINK_TRAIN_NORMAL |
+                                  DP_TP_CTL_ENHANCED_FRAME_ENABLE |
+                                  DP_TP_CTL_ENABLE);
 
-                       /* Disable DP_TP_CTL and FDI_RX_CTL) and retry */
-                       I915_WRITE(DP_TP_CTL(PORT_E),
-                                       I915_READ(DP_TP_CTL(PORT_E)) &
-                                               ~DP_TP_CTL_ENABLE);
-                       I915_WRITE(FDI_RX_CTL(pipe),
-                                       I915_READ(FDI_RX_CTL(pipe)) &
-                                               ~FDI_RX_PLL_ENABLE);
-                       continue;
+                       return;
                }
-       }
 
-       DRM_DEBUG_KMS("FDI train done.\n");
-}
-
-/* For DDI connections, it is possible to support different outputs over the
- * same DDI port, such as HDMI or DP or even VGA via FDI. So we don't know by
- * the time the output is detected what exactly is on the other end of it. This
- * function aims at providing support for this detection and proper output
- * configuration.
- */
-void intel_ddi_init(struct drm_device *dev, enum port port)
-{
-       /* For now, we don't do any proper output detection and assume that we
-        * handle HDMI only */
-
-       switch(port){
-       case PORT_A:
-               /* We don't handle eDP and DP yet */
-               DRM_DEBUG_DRIVER("Found digital output on DDI port A\n");
-               break;
-       /* Assume that the  ports B, C and D are working in HDMI mode for now */
-       case PORT_B:
-       case PORT_C:
-       case PORT_D:
-               intel_hdmi_init(dev, DDI_BUF_CTL(port), port);
-               break;
-       default:
-               DRM_DEBUG_DRIVER("No handlers defined for port %d, skipping DDI initialization\n",
-                               port);
-               break;
+               temp = I915_READ(DDI_BUF_CTL(PORT_E));
+               temp &= ~DDI_BUF_CTL_ENABLE;
+               I915_WRITE(DDI_BUF_CTL(PORT_E), temp);
+               POSTING_READ(DDI_BUF_CTL(PORT_E));
+
+               /* Disable DP_TP_CTL and FDI_RX_CTL and retry */
+               temp = I915_READ(DP_TP_CTL(PORT_E));
+               temp &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
+               temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
+               I915_WRITE(DP_TP_CTL(PORT_E), temp);
+               POSTING_READ(DP_TP_CTL(PORT_E));
+
+               intel_wait_ddi_buf_idle(dev_priv, PORT_E);
+
+               rx_ctl_val &= ~FDI_RX_ENABLE;
+               I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
+               POSTING_READ(_FDI_RXA_CTL);
+
+               /* Reset FDI_RX_MISC pwrdn lanes */
+               temp = I915_READ(_FDI_RXA_MISC);
+               temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
+               temp |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
+               I915_WRITE(_FDI_RXA_MISC, temp);
+               POSTING_READ(_FDI_RXA_MISC);
        }
+
+       DRM_ERROR("FDI link training failed!\n");
 }
 
 /* WRPLL clock dividers */
@@ -645,116 +661,435 @@ static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = {
        {298000,        2,      21,     19},
 };
 
-void intel_ddi_mode_set(struct drm_encoder *encoder,
-                               struct drm_display_mode *mode,
-                               struct drm_display_mode *adjusted_mode)
+static void intel_ddi_mode_set(struct drm_encoder *encoder,
+                              struct drm_display_mode *mode,
+                              struct drm_display_mode *adjusted_mode)
 {
-       struct drm_device *dev = encoder->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_crtc *crtc = encoder->crtc;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
-       int port = intel_hdmi->ddi_port;
+       struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+       int port = intel_ddi_get_encoder_port(intel_encoder);
        int pipe = intel_crtc->pipe;
-       int p, n2, r2;
-       u32 temp, i;
+       int type = intel_encoder->type;
 
-       /* On Haswell, we need to enable the clocks and prepare DDI function to
-        * work in HDMI mode for this pipe.
-        */
-       DRM_DEBUG_KMS("Preparing HDMI DDI mode for Haswell on port %c, pipe %c\n", port_name(port), pipe_name(pipe));
+       DRM_DEBUG_KMS("Preparing DDI mode for Haswell on port %c, pipe %c\n",
+                     port_name(port), pipe_name(pipe));
+
+       if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
+               struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+               intel_dp->DP = DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW;
+               switch (intel_dp->lane_count) {
+               case 1:
+                       intel_dp->DP |= DDI_PORT_WIDTH_X1;
+                       break;
+               case 2:
+                       intel_dp->DP |= DDI_PORT_WIDTH_X2;
+                       break;
+               case 4:
+                       intel_dp->DP |= DDI_PORT_WIDTH_X4;
+                       break;
+               default:
+                       intel_dp->DP |= DDI_PORT_WIDTH_X4;
+                       WARN(1, "Unexpected DP lane count %d\n",
+                            intel_dp->lane_count);
+                       break;
+               }
+
+               if (intel_dp->has_audio) {
+                       DRM_DEBUG_DRIVER("DP audio on pipe %c on DDI\n",
+                                        pipe_name(intel_crtc->pipe));
+
+                       /* write eld */
+                       DRM_DEBUG_DRIVER("DP audio: write eld information\n");
+                       intel_write_eld(encoder, adjusted_mode);
+               }
+
+               intel_dp_init_link_config(intel_dp);
+
+       } else if (type == INTEL_OUTPUT_HDMI) {
+               struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+
+               if (intel_hdmi->has_audio) {
+                       /* Proper support for digital audio needs a new logic
+                        * and a new set of registers, so we leave it for future
+                        * patch bombing.
+                        */
+                       DRM_DEBUG_DRIVER("HDMI audio on pipe %c on DDI\n",
+                                        pipe_name(intel_crtc->pipe));
+
+                       /* write eld */
+                       DRM_DEBUG_DRIVER("HDMI audio: write eld information\n");
+                       intel_write_eld(encoder, adjusted_mode);
+               }
+
+               intel_hdmi->set_infoframes(encoder, adjusted_mode);
+       }
+}
+
+static struct intel_encoder *
+intel_ddi_get_crtc_encoder(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct intel_encoder *intel_encoder, *ret = NULL;
+       int num_encoders = 0;
+
+       for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+               ret = intel_encoder;
+               num_encoders++;
+       }
+
+       if (num_encoders != 1)
+               WARN(1, "%d encoders on crtc for pipe %d\n", num_encoders,
+                    intel_crtc->pipe);
+
+       BUG_ON(ret == NULL);
+       return ret;
+}
+
+void intel_ddi_put_crtc_pll(struct drm_crtc *crtc)
+{
+       struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+       struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       uint32_t val;
+
+       switch (intel_crtc->ddi_pll_sel) {
+       case PORT_CLK_SEL_SPLL:
+               plls->spll_refcount--;
+               if (plls->spll_refcount == 0) {
+                       DRM_DEBUG_KMS("Disabling SPLL\n");
+                       val = I915_READ(SPLL_CTL);
+                       WARN_ON(!(val & SPLL_PLL_ENABLE));
+                       I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
+                       POSTING_READ(SPLL_CTL);
+               }
+               break;
+       case PORT_CLK_SEL_WRPLL1:
+               plls->wrpll1_refcount--;
+               if (plls->wrpll1_refcount == 0) {
+                       DRM_DEBUG_KMS("Disabling WRPLL 1\n");
+                       val = I915_READ(WRPLL_CTL1);
+                       WARN_ON(!(val & WRPLL_PLL_ENABLE));
+                       I915_WRITE(WRPLL_CTL1, val & ~WRPLL_PLL_ENABLE);
+                       POSTING_READ(WRPLL_CTL1);
+               }
+               break;
+       case PORT_CLK_SEL_WRPLL2:
+               plls->wrpll2_refcount--;
+               if (plls->wrpll2_refcount == 0) {
+                       DRM_DEBUG_KMS("Disabling WRPLL 2\n");
+                       val = I915_READ(WRPLL_CTL2);
+                       WARN_ON(!(val & WRPLL_PLL_ENABLE));
+                       I915_WRITE(WRPLL_CTL2, val & ~WRPLL_PLL_ENABLE);
+                       POSTING_READ(WRPLL_CTL2);
+               }
+               break;
+       }
+
+       WARN(plls->spll_refcount < 0, "Invalid SPLL refcount\n");
+       WARN(plls->wrpll1_refcount < 0, "Invalid WRPLL1 refcount\n");
+       WARN(plls->wrpll2_refcount < 0, "Invalid WRPLL2 refcount\n");
+
+       intel_crtc->ddi_pll_sel = PORT_CLK_SEL_NONE;
+}
+
+static void intel_ddi_calculate_wrpll(int clock, int *p, int *n2, int *r2)
+{
+       u32 i;
 
        for (i = 0; i < ARRAY_SIZE(wrpll_tmds_clock_table); i++)
-               if (crtc->mode.clock <= wrpll_tmds_clock_table[i].clock)
+               if (clock <= wrpll_tmds_clock_table[i].clock)
                        break;
 
        if (i == ARRAY_SIZE(wrpll_tmds_clock_table))
                i--;
 
-       p = wrpll_tmds_clock_table[i].p;
-       n2 = wrpll_tmds_clock_table[i].n2;
-       r2 = wrpll_tmds_clock_table[i].r2;
+       *p = wrpll_tmds_clock_table[i].p;
+       *n2 = wrpll_tmds_clock_table[i].n2;
+       *r2 = wrpll_tmds_clock_table[i].r2;
 
-       if (wrpll_tmds_clock_table[i].clock != crtc->mode.clock)
-               DRM_INFO("WR PLL: using settings for %dKHz on %dKHz mode\n",
-                        wrpll_tmds_clock_table[i].clock, crtc->mode.clock);
+       if (wrpll_tmds_clock_table[i].clock != clock)
+               DRM_INFO("WRPLL: using settings for %dKHz on %dKHz mode\n",
+                        wrpll_tmds_clock_table[i].clock, clock);
 
-       DRM_DEBUG_KMS("WR PLL: %dKHz refresh rate with p=%d, n2=%d r2=%d\n",
-                     crtc->mode.clock, p, n2, r2);
+       DRM_DEBUG_KMS("WRPLL: %dKHz refresh rate with p=%d, n2=%d r2=%d\n",
+                     clock, *p, *n2, *r2);
+}
 
-       /* Enable LCPLL if disabled */
-       temp = I915_READ(LCPLL_CTL);
-       if (temp & LCPLL_PLL_DISABLE)
-               I915_WRITE(LCPLL_CTL,
-                               temp & ~LCPLL_PLL_DISABLE);
+bool intel_ddi_pll_mode_set(struct drm_crtc *crtc, int clock)
+{
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
+       struct drm_encoder *encoder = &intel_encoder->base;
+       struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+       struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
+       int type = intel_encoder->type;
+       enum pipe pipe = intel_crtc->pipe;
+       uint32_t reg, val;
 
-       /* Configure WR PLL 1, program the correct divider values for
-        * the desired frequency and wait for warmup */
-       I915_WRITE(WRPLL_CTL1,
-                       WRPLL_PLL_ENABLE |
-                       WRPLL_PLL_SELECT_LCPLL_2700 |
-                       WRPLL_DIVIDER_REFERENCE(r2) |
-                       WRPLL_DIVIDER_FEEDBACK(n2) |
-                       WRPLL_DIVIDER_POST(p));
+       /* TODO: reuse PLLs when possible (compare values) */
 
-       udelay(20);
+       intel_ddi_put_crtc_pll(crtc);
 
-       /* Use WRPLL1 clock to drive the output to the port, and tell the pipe to use
-        * this port for connection.
-        */
-       I915_WRITE(PORT_CLK_SEL(port),
-                       PORT_CLK_SEL_WRPLL1);
-       I915_WRITE(PIPE_CLK_SEL(pipe),
-                       PIPE_CLK_SEL_PORT(port));
+       if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
+               struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+               switch (intel_dp->link_bw) {
+               case DP_LINK_BW_1_62:
+                       intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
+                       break;
+               case DP_LINK_BW_2_7:
+                       intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
+                       break;
+               case DP_LINK_BW_5_4:
+                       intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
+                       break;
+               default:
+                       DRM_ERROR("Link bandwidth %d unsupported\n",
+                                 intel_dp->link_bw);
+                       return false;
+               }
+
+               /* We don't need to turn any PLL on because we'll use LCPLL. */
+               return true;
+
+       } else if (type == INTEL_OUTPUT_HDMI) {
+               int p, n2, r2;
+
+               if (plls->wrpll1_refcount == 0) {
+                       DRM_DEBUG_KMS("Using WRPLL 1 on pipe %c\n",
+                                     pipe_name(pipe));
+                       plls->wrpll1_refcount++;
+                       reg = WRPLL_CTL1;
+                       intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL1;
+               } else if (plls->wrpll2_refcount == 0) {
+                       DRM_DEBUG_KMS("Using WRPLL 2 on pipe %c\n",
+                                     pipe_name(pipe));
+                       plls->wrpll2_refcount++;
+                       reg = WRPLL_CTL2;
+                       intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL2;
+               } else {
+                       DRM_ERROR("No WRPLLs available!\n");
+                       return false;
+               }
 
+               WARN(I915_READ(reg) & WRPLL_PLL_ENABLE,
+                    "WRPLL already enabled\n");
+
+               intel_ddi_calculate_wrpll(clock, &p, &n2, &r2);
+
+               val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 |
+                     WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
+                     WRPLL_DIVIDER_POST(p);
+
+       } else if (type == INTEL_OUTPUT_ANALOG) {
+               if (plls->spll_refcount == 0) {
+                       DRM_DEBUG_KMS("Using SPLL on pipe %c\n",
+                                     pipe_name(pipe));
+                       plls->spll_refcount++;
+                       reg = SPLL_CTL;
+                       intel_crtc->ddi_pll_sel = PORT_CLK_SEL_SPLL;
+               }
+
+               WARN(I915_READ(reg) & SPLL_PLL_ENABLE,
+                    "SPLL already enabled\n");
+
+               val = SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC;
+
+       } else {
+               WARN(1, "Invalid DDI encoder type %d\n", type);
+               return false;
+       }
+
+       I915_WRITE(reg, val);
        udelay(20);
 
-       if (intel_hdmi->has_audio) {
-               /* Proper support for digital audio needs a new logic and a new set
-                * of registers, so we leave it for future patch bombing.
-                */
-               DRM_DEBUG_DRIVER("HDMI audio on pipe %c on DDI\n",
-                                pipe_name(intel_crtc->pipe));
+       return true;
+}
 
-               /* write eld */
-               DRM_DEBUG_DRIVER("HDMI audio: write eld information\n");
-               intel_write_eld(encoder, adjusted_mode);
+void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
+{
+       struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
+       enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+       int type = intel_encoder->type;
+       uint32_t temp;
+
+       if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
+
+               temp = TRANS_MSA_SYNC_CLK;
+               switch (intel_crtc->bpp) {
+               case 18:
+                       temp |= TRANS_MSA_6_BPC;
+                       break;
+               case 24:
+                       temp |= TRANS_MSA_8_BPC;
+                       break;
+               case 30:
+                       temp |= TRANS_MSA_10_BPC;
+                       break;
+               case 36:
+                       temp |= TRANS_MSA_12_BPC;
+                       break;
+               default:
+                       temp |= TRANS_MSA_8_BPC;
+                       WARN(1, "%d bpp unsupported by DDI function\n",
+                            intel_crtc->bpp);
+               }
+               I915_WRITE(TRANS_MSA_MISC(cpu_transcoder), temp);
        }
+}
 
-       /* Enable PIPE_DDI_FUNC_CTL for the pipe to work in HDMI mode */
-       temp = PIPE_DDI_FUNC_ENABLE | PIPE_DDI_SELECT_PORT(port);
+void intel_ddi_enable_pipe_func(struct drm_crtc *crtc)
+{
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
+       struct drm_encoder *encoder = &intel_encoder->base;
+       struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+       enum pipe pipe = intel_crtc->pipe;
+       enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+       enum port port = intel_ddi_get_encoder_port(intel_encoder);
+       int type = intel_encoder->type;
+       uint32_t temp;
+
+       /* Enable TRANS_DDI_FUNC_CTL for the pipe to work in HDMI mode */
+       temp = TRANS_DDI_FUNC_ENABLE;
+       temp |= TRANS_DDI_SELECT_PORT(port);
 
        switch (intel_crtc->bpp) {
        case 18:
-               temp |= PIPE_DDI_BPC_6;
+               temp |= TRANS_DDI_BPC_6;
                break;
        case 24:
-               temp |= PIPE_DDI_BPC_8;
+               temp |= TRANS_DDI_BPC_8;
                break;
        case 30:
-               temp |= PIPE_DDI_BPC_10;
+               temp |= TRANS_DDI_BPC_10;
                break;
        case 36:
-               temp |= PIPE_DDI_BPC_12;
+               temp |= TRANS_DDI_BPC_12;
                break;
        default:
-               WARN(1, "%d bpp unsupported by pipe DDI function\n",
+               WARN(1, "%d bpp unsupported by transcoder DDI function\n",
                     intel_crtc->bpp);
        }
 
-       if (intel_hdmi->has_hdmi_sink)
-               temp |= PIPE_DDI_MODE_SELECT_HDMI;
+       if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
+               temp |= TRANS_DDI_PVSYNC;
+       if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
+               temp |= TRANS_DDI_PHSYNC;
+
+       if (cpu_transcoder == TRANSCODER_EDP) {
+               switch (pipe) {
+               case PIPE_A:
+                       temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
+                       break;
+               case PIPE_B:
+                       temp |= TRANS_DDI_EDP_INPUT_B_ONOFF;
+                       break;
+               case PIPE_C:
+                       temp |= TRANS_DDI_EDP_INPUT_C_ONOFF;
+                       break;
+               default:
+                       BUG();
+                       break;
+               }
+       }
+
+       if (type == INTEL_OUTPUT_HDMI) {
+               struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+
+               if (intel_hdmi->has_hdmi_sink)
+                       temp |= TRANS_DDI_MODE_SELECT_HDMI;
+               else
+                       temp |= TRANS_DDI_MODE_SELECT_DVI;
+
+       } else if (type == INTEL_OUTPUT_ANALOG) {
+               temp |= TRANS_DDI_MODE_SELECT_FDI;
+               temp |= (intel_crtc->fdi_lanes - 1) << 1;
+
+       } else if (type == INTEL_OUTPUT_DISPLAYPORT ||
+                  type == INTEL_OUTPUT_EDP) {
+               struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+               temp |= TRANS_DDI_MODE_SELECT_DP_SST;
+
+               switch (intel_dp->lane_count) {
+               case 1:
+                       temp |= TRANS_DDI_PORT_WIDTH_X1;
+                       break;
+               case 2:
+                       temp |= TRANS_DDI_PORT_WIDTH_X2;
+                       break;
+               case 4:
+                       temp |= TRANS_DDI_PORT_WIDTH_X4;
+                       break;
+               default:
+                       temp |= TRANS_DDI_PORT_WIDTH_X4;
+                       WARN(1, "Unsupported lane count %d\n",
+                            intel_dp->lane_count);
+               }
+
+       } else {
+               WARN(1, "Invalid encoder type %d for pipe %d\n",
+                    intel_encoder->type, pipe);
+       }
+
+       I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
+}
+
+void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
+                                      enum transcoder cpu_transcoder)
+{
+       uint32_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
+       uint32_t val = I915_READ(reg);
+
+       val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK);
+       val |= TRANS_DDI_PORT_NONE;
+       I915_WRITE(reg, val);
+}
+
+bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
+{
+       struct drm_device *dev = intel_connector->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_encoder *intel_encoder = intel_connector->encoder;
+       int type = intel_connector->base.connector_type;
+       enum port port = intel_ddi_get_encoder_port(intel_encoder);
+       enum pipe pipe = 0;
+       enum transcoder cpu_transcoder;
+       uint32_t tmp;
+
+       if (!intel_encoder->get_hw_state(intel_encoder, &pipe))
+               return false;
+
+       if (port == PORT_A)
+               cpu_transcoder = TRANSCODER_EDP;
        else
-               temp |= PIPE_DDI_MODE_SELECT_DVI;
+               cpu_transcoder = pipe;
+
+       tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
 
-       if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
-               temp |= PIPE_DDI_PVSYNC;
-       if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
-               temp |= PIPE_DDI_PHSYNC;
+       switch (tmp & TRANS_DDI_MODE_SELECT_MASK) {
+       case TRANS_DDI_MODE_SELECT_HDMI:
+       case TRANS_DDI_MODE_SELECT_DVI:
+               return (type == DRM_MODE_CONNECTOR_HDMIA);
+
+       case TRANS_DDI_MODE_SELECT_DP_SST:
+               if (type == DRM_MODE_CONNECTOR_eDP)
+                       return true;
+       case TRANS_DDI_MODE_SELECT_DP_MST:
+               return (type == DRM_MODE_CONNECTOR_DisplayPort);
 
-       I915_WRITE(DDI_FUNC_CTL(pipe), temp);
+       case TRANS_DDI_MODE_SELECT_FDI:
+               return (type == DRM_MODE_CONNECTOR_VGA);
 
-       intel_hdmi->set_infoframes(encoder, adjusted_mode);
+       default:
+               return false;
+       }
 }
 
 bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
@@ -762,58 +1097,418 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
 {
        struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+       enum port port = intel_ddi_get_encoder_port(encoder);
        u32 tmp;
        int i;
 
-       tmp = I915_READ(DDI_BUF_CTL(intel_hdmi->ddi_port));
+       tmp = I915_READ(DDI_BUF_CTL(port));
 
        if (!(tmp & DDI_BUF_CTL_ENABLE))
                return false;
 
-       for_each_pipe(i) {
-               tmp = I915_READ(DDI_FUNC_CTL(i));
+       if (port == PORT_A) {
+               tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
 
-               if ((tmp & PIPE_DDI_PORT_MASK)
-                   == PIPE_DDI_SELECT_PORT(intel_hdmi->ddi_port)) {
-                       *pipe = i;
-                       return true;
+               switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
+               case TRANS_DDI_EDP_INPUT_A_ON:
+               case TRANS_DDI_EDP_INPUT_A_ONOFF:
+                       *pipe = PIPE_A;
+                       break;
+               case TRANS_DDI_EDP_INPUT_B_ONOFF:
+                       *pipe = PIPE_B;
+                       break;
+               case TRANS_DDI_EDP_INPUT_C_ONOFF:
+                       *pipe = PIPE_C;
+                       break;
+               }
+
+               return true;
+       } else {
+               for (i = TRANSCODER_A; i <= TRANSCODER_C; i++) {
+                       tmp = I915_READ(TRANS_DDI_FUNC_CTL(i));
+
+                       if ((tmp & TRANS_DDI_PORT_MASK)
+                           == TRANS_DDI_SELECT_PORT(port)) {
+                               *pipe = i;
+                               return true;
+                       }
                }
        }
 
-       DRM_DEBUG_KMS("No pipe for ddi port %i found\n", intel_hdmi->ddi_port);
+       DRM_DEBUG_KMS("No pipe for ddi port %i found\n", port);
 
        return true;
 }
 
-void intel_enable_ddi(struct intel_encoder *encoder)
+static uint32_t intel_ddi_get_crtc_pll(struct drm_i915_private *dev_priv,
+                                      enum pipe pipe)
+{
+       uint32_t temp, ret;
+       enum port port;
+       enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+                                                                     pipe);
+       int i;
+
+       if (cpu_transcoder == TRANSCODER_EDP) {
+               port = PORT_A;
+       } else {
+               temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+               temp &= TRANS_DDI_PORT_MASK;
+
+               for (i = PORT_B; i <= PORT_E; i++)
+                       if (temp == TRANS_DDI_SELECT_PORT(i))
+                               port = i;
+       }
+
+       ret = I915_READ(PORT_CLK_SEL(port));
+
+       DRM_DEBUG_KMS("Pipe %c connected to port %c using clock 0x%08x\n",
+                     pipe_name(pipe), port_name(port), ret);
+
+       return ret;
+}
+
+void intel_ddi_setup_hw_pll_state(struct drm_device *dev)
 {
-       struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
-       int port = intel_hdmi->ddi_port;
-       u32 temp;
+       enum pipe pipe;
+       struct intel_crtc *intel_crtc;
 
-       temp = I915_READ(DDI_BUF_CTL(port));
-       temp |= DDI_BUF_CTL_ENABLE;
+       for_each_pipe(pipe) {
+               intel_crtc =
+                       to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
 
-       /* Enable DDI_BUF_CTL. In HDMI/DVI mode, the port width,
-        * and swing/emphasis values are ignored so nothing special needs
-        * to be done besides enabling the port.
-        */
-       I915_WRITE(DDI_BUF_CTL(port), temp);
+               if (!intel_crtc->active)
+                       continue;
+
+               intel_crtc->ddi_pll_sel = intel_ddi_get_crtc_pll(dev_priv,
+                                                                pipe);
+
+               switch (intel_crtc->ddi_pll_sel) {
+               case PORT_CLK_SEL_SPLL:
+                       dev_priv->ddi_plls.spll_refcount++;
+                       break;
+               case PORT_CLK_SEL_WRPLL1:
+                       dev_priv->ddi_plls.wrpll1_refcount++;
+                       break;
+               case PORT_CLK_SEL_WRPLL2:
+                       dev_priv->ddi_plls.wrpll2_refcount++;
+                       break;
+               }
+       }
 }
 
-void intel_disable_ddi(struct intel_encoder *encoder)
+void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
 {
-       struct drm_device *dev = encoder->base.dev;
+       struct drm_crtc *crtc = &intel_crtc->base;
+       struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+       struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
+       enum port port = intel_ddi_get_encoder_port(intel_encoder);
+       enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+
+       if (cpu_transcoder != TRANSCODER_EDP)
+               I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
+                          TRANS_CLK_SEL_PORT(port));
+}
+
+void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc)
+{
+       struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
+       enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+
+       if (cpu_transcoder != TRANSCODER_EDP)
+               I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
+                          TRANS_CLK_SEL_DISABLED);
+}
+
+static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
+{
+       struct drm_encoder *encoder = &intel_encoder->base;
+       struct drm_crtc *crtc = encoder->crtc;
+       struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       enum port port = intel_ddi_get_encoder_port(intel_encoder);
+       int type = intel_encoder->type;
+
+       if (type == INTEL_OUTPUT_EDP) {
+               struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+               ironlake_edp_panel_vdd_on(intel_dp);
+               ironlake_edp_panel_on(intel_dp);
+               ironlake_edp_panel_vdd_off(intel_dp, true);
+       }
+
+       WARN_ON(intel_crtc->ddi_pll_sel == PORT_CLK_SEL_NONE);
+       I915_WRITE(PORT_CLK_SEL(port), intel_crtc->ddi_pll_sel);
+
+       if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
+               struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+               intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+               intel_dp_start_link_train(intel_dp);
+               intel_dp_complete_link_train(intel_dp);
+       }
+}
+
+static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
+{
+       struct drm_encoder *encoder = &intel_encoder->base;
+       struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+       enum port port = intel_ddi_get_encoder_port(intel_encoder);
+       int type = intel_encoder->type;
+       uint32_t val;
+       bool wait = false;
+
+       val = I915_READ(DDI_BUF_CTL(port));
+       if (val & DDI_BUF_CTL_ENABLE) {
+               val &= ~DDI_BUF_CTL_ENABLE;
+               I915_WRITE(DDI_BUF_CTL(port), val);
+               wait = true;
+       }
+
+       val = I915_READ(DP_TP_CTL(port));
+       val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
+       val |= DP_TP_CTL_LINK_TRAIN_PAT1;
+       I915_WRITE(DP_TP_CTL(port), val);
+
+       if (wait)
+               intel_wait_ddi_buf_idle(dev_priv, port);
+
+       if (type == INTEL_OUTPUT_EDP) {
+               struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+               ironlake_edp_panel_vdd_on(intel_dp);
+               ironlake_edp_panel_off(intel_dp);
+       }
+
+       I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
+}
+
+static void intel_enable_ddi(struct intel_encoder *intel_encoder)
+{
+       struct drm_encoder *encoder = &intel_encoder->base;
+       struct drm_device *dev = encoder->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
-       int port = intel_hdmi->ddi_port;
-       u32 temp;
+       enum port port = intel_ddi_get_encoder_port(intel_encoder);
+       int type = intel_encoder->type;
+
+       if (type == INTEL_OUTPUT_HDMI) {
+               /* In HDMI/DVI mode, the port width, and swing/emphasis values
+                * are ignored so nothing special needs to be done besides
+                * enabling the port.
+                */
+               I915_WRITE(DDI_BUF_CTL(port), DDI_BUF_CTL_ENABLE);
+       } else if (type == INTEL_OUTPUT_EDP) {
+               struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+               ironlake_edp_backlight_on(intel_dp);
+       }
+}
+
+static void intel_disable_ddi(struct intel_encoder *intel_encoder)
+{
+       struct drm_encoder *encoder = &intel_encoder->base;
+       int type = intel_encoder->type;
+
+       if (type == INTEL_OUTPUT_EDP) {
+               struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+               ironlake_edp_backlight_off(intel_dp);
+       }
+}
+
+int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
+{
+       if (I915_READ(HSW_FUSE_STRAP) & HSW_CDCLK_LIMIT)
+               return 450;
+       else if ((I915_READ(LCPLL_CTL) & LCPLL_CLK_FREQ_MASK) ==
+                LCPLL_CLK_FREQ_450)
+               return 450;
+       else if (IS_ULT(dev_priv->dev))
+               return 338;
+       else
+               return 540;
+}
+
+void intel_ddi_pll_init(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       uint32_t val = I915_READ(LCPLL_CTL);
+
+       /* The LCPLL register should be turned on by the BIOS. For now let's
+        * just check its state and print errors in case something is wrong.
+        * Don't even try to turn it on.
+        */
+
+       DRM_DEBUG_KMS("CDCLK running at %dMHz\n",
+                     intel_ddi_get_cdclk_freq(dev_priv));
+
+       if (val & LCPLL_CD_SOURCE_FCLK)
+               DRM_ERROR("CDCLK source is not LCPLL\n");
+
+       if (val & LCPLL_PLL_DISABLE)
+               DRM_ERROR("LCPLL is disabled\n");
+}
+
+void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder)
+{
+       struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+       struct intel_dp *intel_dp = &intel_dig_port->dp;
+       struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+       enum port port = intel_dig_port->port;
+       bool wait;
+       uint32_t val;
+
+       if (I915_READ(DP_TP_CTL(port)) & DP_TP_CTL_ENABLE) {
+               val = I915_READ(DDI_BUF_CTL(port));
+               if (val & DDI_BUF_CTL_ENABLE) {
+                       val &= ~DDI_BUF_CTL_ENABLE;
+                       I915_WRITE(DDI_BUF_CTL(port), val);
+                       wait = true;
+               }
+
+               val = I915_READ(DP_TP_CTL(port));
+               val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
+               val |= DP_TP_CTL_LINK_TRAIN_PAT1;
+               I915_WRITE(DP_TP_CTL(port), val);
+               POSTING_READ(DP_TP_CTL(port));
+
+               if (wait)
+                       intel_wait_ddi_buf_idle(dev_priv, port);
+       }
+
+       val = DP_TP_CTL_ENABLE | DP_TP_CTL_MODE_SST |
+             DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_SCRAMBLE_DISABLE;
+       if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
+               val |= DP_TP_CTL_ENHANCED_FRAME_ENABLE;
+       I915_WRITE(DP_TP_CTL(port), val);
+       POSTING_READ(DP_TP_CTL(port));
+
+       intel_dp->DP |= DDI_BUF_CTL_ENABLE;
+       I915_WRITE(DDI_BUF_CTL(port), intel_dp->DP);
+       POSTING_READ(DDI_BUF_CTL(port));
+
+       udelay(600);
+}
+
+void intel_ddi_fdi_disable(struct drm_crtc *crtc)
+{
+       struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+       struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
+       uint32_t val;
+
+       intel_ddi_post_disable(intel_encoder);
+
+       val = I915_READ(_FDI_RXA_CTL);
+       val &= ~FDI_RX_ENABLE;
+       I915_WRITE(_FDI_RXA_CTL, val);
+
+       val = I915_READ(_FDI_RXA_MISC);
+       val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
+       val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
+       I915_WRITE(_FDI_RXA_MISC, val);
+
+       val = I915_READ(_FDI_RXA_CTL);
+       val &= ~FDI_PCDCLK;
+       I915_WRITE(_FDI_RXA_CTL, val);
+
+       val = I915_READ(_FDI_RXA_CTL);
+       val &= ~FDI_RX_PLL_ENABLE;
+       I915_WRITE(_FDI_RXA_CTL, val);
+}
+
+static void intel_ddi_hot_plug(struct intel_encoder *intel_encoder)
+{
+       struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
+       int type = intel_encoder->type;
+
+       if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP)
+               intel_dp_check_link_status(intel_dp);
+}
+
+static void intel_ddi_destroy(struct drm_encoder *encoder)
+{
+       /* HDMI has nothing special to destroy, so we can go with this. */
+       intel_dp_encoder_destroy(encoder);
+}
+
+static bool intel_ddi_mode_fixup(struct drm_encoder *encoder,
+                                const struct drm_display_mode *mode,
+                                struct drm_display_mode *adjusted_mode)
+{
+       struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+       int type = intel_encoder->type;
+
+       WARN(type == INTEL_OUTPUT_UNKNOWN, "mode_fixup() on unknown output!\n");
+
+       if (type == INTEL_OUTPUT_HDMI)
+               return intel_hdmi_mode_fixup(encoder, mode, adjusted_mode);
+       else
+               return intel_dp_mode_fixup(encoder, mode, adjusted_mode);
+}
+
+static const struct drm_encoder_funcs intel_ddi_funcs = {
+       .destroy = intel_ddi_destroy,
+};
+
+static const struct drm_encoder_helper_funcs intel_ddi_helper_funcs = {
+       .mode_fixup = intel_ddi_mode_fixup,
+       .mode_set = intel_ddi_mode_set,
+       .disable = intel_encoder_noop,
+};
+
+void intel_ddi_init(struct drm_device *dev, enum port port)
+{
+       struct intel_digital_port *intel_dig_port;
+       struct intel_encoder *intel_encoder;
+       struct drm_encoder *encoder;
+       struct intel_connector *hdmi_connector = NULL;
+       struct intel_connector *dp_connector = NULL;
+
+       intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
+       if (!intel_dig_port)
+               return;
+
+       dp_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+       if (!dp_connector) {
+               kfree(intel_dig_port);
+               return;
+       }
+
+       if (port != PORT_A) {
+               hdmi_connector = kzalloc(sizeof(struct intel_connector),
+                                        GFP_KERNEL);
+               if (!hdmi_connector) {
+                       kfree(dp_connector);
+                       kfree(intel_dig_port);
+                       return;
+               }
+       }
+
+       intel_encoder = &intel_dig_port->base;
+       encoder = &intel_encoder->base;
+
+       drm_encoder_init(dev, encoder, &intel_ddi_funcs,
+                        DRM_MODE_ENCODER_TMDS);
+       drm_encoder_helper_add(encoder, &intel_ddi_helper_funcs);
+
+       intel_encoder->enable = intel_enable_ddi;
+       intel_encoder->pre_enable = intel_ddi_pre_enable;
+       intel_encoder->disable = intel_disable_ddi;
+       intel_encoder->post_disable = intel_ddi_post_disable;
+       intel_encoder->get_hw_state = intel_ddi_get_hw_state;
+
+       intel_dig_port->port = port;
+       if (hdmi_connector)
+               intel_dig_port->hdmi.sdvox_reg = DDI_BUF_CTL(port);
+       else
+               intel_dig_port->hdmi.sdvox_reg = 0;
+       intel_dig_port->dp.output_reg = DDI_BUF_CTL(port);
 
-       temp = I915_READ(DDI_BUF_CTL(port));
-       temp &= ~DDI_BUF_CTL_ENABLE;
+       intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
+       intel_encoder->crtc_mask =  (1 << 0) | (1 << 1) | (1 << 2);
+       intel_encoder->cloneable = false;
+       intel_encoder->hot_plug = intel_ddi_hot_plug;
 
-       I915_WRITE(DDI_BUF_CTL(port), temp);
+       if (hdmi_connector)
+               intel_hdmi_init_connector(intel_dig_port, hdmi_connector);
+       intel_dp_init_connector(intel_dig_port, dp_connector);
 }
index b426d44..5d127e0 100644 (file)
@@ -41,8 +41,6 @@
 #include <drm/drm_crtc_helper.h>
 #include <linux/dma_remapping.h>
 
-#define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
-
 bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
 static void intel_increase_pllclock(struct drm_crtc *crtc);
 static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
@@ -80,6 +78,16 @@ struct intel_limit {
 /* FDI */
 #define IRONLAKE_FDI_FREQ              2700000 /* in kHz for mode->clock */
 
+int
+intel_pch_rawclk(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       WARN_ON(!HAS_PCH_SPLIT(dev));
+
+       return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
+}
+
 static bool
 intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
                    int target, int refclk, intel_clock_t *match_clock,
@@ -380,7 +388,7 @@ static const intel_limit_t intel_limits_vlv_dac = {
 
 static const intel_limit_t intel_limits_vlv_hdmi = {
        .dot = { .min = 20000, .max = 165000 },
-       .vco = { .min = 5994000, .max = 4000000 },
+       .vco = { .min = 4000000, .max = 5994000},
        .n = { .min = 1, .max = 7 },
        .m = { .min = 60, .max = 300 }, /* guess */
        .m1 = { .min = 2, .max = 3 },
@@ -393,10 +401,10 @@ static const intel_limit_t intel_limits_vlv_hdmi = {
 };
 
 static const intel_limit_t intel_limits_vlv_dp = {
-       .dot = { .min = 162000, .max = 270000 },
-       .vco = { .min = 5994000, .max = 4000000 },
+       .dot = { .min = 25000, .max = 270000 },
+       .vco = { .min = 4000000, .max = 6000000 },
        .n = { .min = 1, .max = 7 },
-       .m = { .min = 60, .max = 300 }, /* guess */
+       .m = { .min = 22, .max = 450 },
        .m1 = { .min = 2, .max = 3 },
        .m2 = { .min = 11, .max = 156 },
        .p = { .min = 10, .max = 30 },
@@ -531,7 +539,7 @@ static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
                                limit = &intel_limits_ironlake_single_lvds;
                }
        } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
-                       HAS_eDP)
+                  intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
                limit = &intel_limits_ironlake_display_port;
        else
                limit = &intel_limits_ironlake_dac;
@@ -927,6 +935,15 @@ intel_vlv_find_best_pll(const intel_limit_t *limit, struct drm_crtc *crtc,
        return true;
 }
 
+enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
+                                            enum pipe pipe)
+{
+       struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+       return intel_crtc->cpu_transcoder;
+}
+
 static void ironlake_wait_for_vblank(struct drm_device *dev, int pipe)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -999,9 +1016,11 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe)
 void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
+       enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+                                                                     pipe);
 
        if (INTEL_INFO(dev)->gen >= 4) {
-               int reg = PIPECONF(pipe);
+               int reg = PIPECONF(cpu_transcoder);
 
                /* Wait for the Pipe State to go off */
                if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
@@ -1103,12 +1122,14 @@ static void assert_fdi_tx(struct drm_i915_private *dev_priv,
        int reg;
        u32 val;
        bool cur_state;
+       enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+                                                                     pipe);
 
        if (IS_HASWELL(dev_priv->dev)) {
                /* On Haswell, DDI is used instead of FDI_TX_CTL */
-               reg = DDI_FUNC_CTL(pipe);
+               reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
                val = I915_READ(reg);
-               cur_state = !!(val & PIPE_DDI_FUNC_ENABLE);
+               cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
        } else {
                reg = FDI_TX_CTL(pipe);
                val = I915_READ(reg);
@@ -1128,14 +1149,9 @@ static void assert_fdi_rx(struct drm_i915_private *dev_priv,
        u32 val;
        bool cur_state;
 
-       if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
-                       DRM_ERROR("Attempting to enable FDI_RX on Haswell pipe > 0\n");
-                       return;
-       } else {
-               reg = FDI_RX_CTL(pipe);
-               val = I915_READ(reg);
-               cur_state = !!(val & FDI_RX_ENABLE);
-       }
+       reg = FDI_RX_CTL(pipe);
+       val = I915_READ(reg);
+       cur_state = !!(val & FDI_RX_ENABLE);
        WARN(cur_state != state,
             "FDI RX state assertion failure (expected %s, current %s)\n",
             state_string(state), state_string(cur_state));
@@ -1168,10 +1184,6 @@ static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
        int reg;
        u32 val;
 
-       if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
-               DRM_ERROR("Attempting to enable FDI on Haswell with pipe > 0\n");
-               return;
-       }
        reg = FDI_RX_CTL(pipe);
        val = I915_READ(reg);
        WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n");
@@ -1212,12 +1224,14 @@ void assert_pipe(struct drm_i915_private *dev_priv,
        int reg;
        u32 val;
        bool cur_state;
+       enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+                                                                     pipe);
 
        /* if we need the pipe A quirk it must be always on */
        if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
                state = true;
 
-       reg = PIPECONF(pipe);
+       reg = PIPECONF(cpu_transcoder);
        val = I915_READ(reg);
        cur_state = !!(val & PIPECONF_ENABLE);
        WARN(cur_state != state,
@@ -1492,24 +1506,26 @@ static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
 
 /* SBI access */
 static void
-intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value)
+intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
+               enum intel_sbi_destination destination)
 {
        unsigned long flags;
+       u32 tmp;
 
        spin_lock_irqsave(&dev_priv->dpio_lock, flags);
-       if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
-                               100)) {
+       if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 100)) {
                DRM_ERROR("timeout waiting for SBI to become ready\n");
                goto out_unlock;
        }
 
-       I915_WRITE(SBI_ADDR,
-                       (reg << 16));
-       I915_WRITE(SBI_DATA,
-                       value);
-       I915_WRITE(SBI_CTL_STAT,
-                       SBI_BUSY |
-                       SBI_CTL_OP_CRWR);
+       I915_WRITE(SBI_ADDR, (reg << 16));
+       I915_WRITE(SBI_DATA, value);
+
+       if (destination == SBI_ICLK)
+               tmp = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRWR;
+       else
+               tmp = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IOWR;
+       I915_WRITE(SBI_CTL_STAT, SBI_BUSY | tmp);
 
        if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
                                100)) {
@@ -1522,23 +1538,25 @@ out_unlock:
 }
 
 static u32
-intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg)
+intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
+              enum intel_sbi_destination destination)
 {
        unsigned long flags;
        u32 value = 0;
 
        spin_lock_irqsave(&dev_priv->dpio_lock, flags);
-       if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
-                               100)) {
+       if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 100)) {
                DRM_ERROR("timeout waiting for SBI to become ready\n");
                goto out_unlock;
        }
 
-       I915_WRITE(SBI_ADDR,
-                       (reg << 16));
-       I915_WRITE(SBI_CTL_STAT,
-                       SBI_BUSY |
-                       SBI_CTL_OP_CRRD);
+       I915_WRITE(SBI_ADDR, (reg << 16));
+
+       if (destination == SBI_ICLK)
+               value = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD;
+       else
+               value = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
+       I915_WRITE(SBI_CTL_STAT, value | SBI_BUSY);
 
        if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
                                100)) {
@@ -1554,14 +1572,14 @@ out_unlock:
 }
 
 /**
- * intel_enable_pch_pll - enable PCH PLL
+ * ironlake_enable_pch_pll - enable PCH PLL
  * @dev_priv: i915 private structure
  * @pipe: pipe PLL to enable
  *
  * The PCH PLL needs to be enabled before the PCH transcoder, since it
  * drives the transcoder clock.
  */
-static void intel_enable_pch_pll(struct intel_crtc *intel_crtc)
+static void ironlake_enable_pch_pll(struct intel_crtc *intel_crtc)
 {
        struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
        struct intel_pch_pll *pll;
@@ -1645,12 +1663,12 @@ static void intel_disable_pch_pll(struct intel_crtc *intel_crtc)
        pll->on = false;
 }
 
-static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
-                                   enum pipe pipe)
+static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
+                                          enum pipe pipe)
 {
-       int reg;
-       u32 val, pipeconf_val;
+       struct drm_device *dev = dev_priv->dev;
        struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+       uint32_t reg, val, pipeconf_val;
 
        /* PCH only available on ILK+ */
        BUG_ON(dev_priv->info->gen < 5);
@@ -1664,10 +1682,15 @@ static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
        assert_fdi_tx_enabled(dev_priv, pipe);
        assert_fdi_rx_enabled(dev_priv, pipe);
 
-       if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
-               DRM_ERROR("Attempting to enable transcoder on Haswell with pipe > 0\n");
-               return;
+       if (HAS_PCH_CPT(dev)) {
+               /* Workaround: Set the timing override bit before enabling the
+                * pch transcoder. */
+               reg = TRANS_CHICKEN2(pipe);
+               val = I915_READ(reg);
+               val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
+               I915_WRITE(reg, val);
        }
+
        reg = TRANSCONF(pipe);
        val = I915_READ(reg);
        pipeconf_val = I915_READ(PIPECONF(pipe));
@@ -1696,11 +1719,42 @@ static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
                DRM_ERROR("failed to enable transcoder %d\n", pipe);
 }
 
-static void intel_disable_transcoder(struct drm_i915_private *dev_priv,
-                                    enum pipe pipe)
+static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
+                                     enum transcoder cpu_transcoder)
 {
-       int reg;
-       u32 val;
+       u32 val, pipeconf_val;
+
+       /* PCH only available on ILK+ */
+       BUG_ON(dev_priv->info->gen < 5);
+
+       /* FDI must be feeding us bits for PCH ports */
+       assert_fdi_tx_enabled(dev_priv, cpu_transcoder);
+       assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
+
+       /* Workaround: set timing override bit. */
+       val = I915_READ(_TRANSA_CHICKEN2);
+       val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
+       I915_WRITE(_TRANSA_CHICKEN2, val);
+
+       val = TRANS_ENABLE;
+       pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
+
+       if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
+           PIPECONF_INTERLACED_ILK)
+               val |= TRANS_INTERLACED;
+       else
+               val |= TRANS_PROGRESSIVE;
+
+       I915_WRITE(TRANSCONF(TRANSCODER_A), val);
+       if (wait_for(I915_READ(_TRANSACONF) & TRANS_STATE_ENABLE, 100))
+               DRM_ERROR("Failed to enable PCH transcoder\n");
+}
+
+static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
+                                           enum pipe pipe)
+{
+       struct drm_device *dev = dev_priv->dev;
+       uint32_t reg, val;
 
        /* FDI relies on the transcoder */
        assert_fdi_tx_disabled(dev_priv, pipe);
@@ -1716,6 +1770,31 @@ static void intel_disable_transcoder(struct drm_i915_private *dev_priv,
        /* wait for PCH transcoder off, transcoder state */
        if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
                DRM_ERROR("failed to disable transcoder %d\n", pipe);
+
+       if (!HAS_PCH_IBX(dev)) {
+               /* Workaround: Clear the timing override chicken bit again. */
+               reg = TRANS_CHICKEN2(pipe);
+               val = I915_READ(reg);
+               val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
+               I915_WRITE(reg, val);
+       }
+}
+
+static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
+{
+       u32 val;
+
+       val = I915_READ(_TRANSACONF);
+       val &= ~TRANS_ENABLE;
+       I915_WRITE(_TRANSACONF, val);
+       /* wait for PCH transcoder off, transcoder state */
+       if (wait_for((I915_READ(_TRANSACONF) & TRANS_STATE_ENABLE) == 0, 50))
+               DRM_ERROR("Failed to disable PCH transcoder\n");
+
+       /* Workaround: clear timing override bit. */
+       val = I915_READ(_TRANSA_CHICKEN2);
+       val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
+       I915_WRITE(_TRANSA_CHICKEN2, val);
 }
 
 /**
@@ -1735,9 +1814,17 @@ static void intel_disable_transcoder(struct drm_i915_private *dev_priv,
 static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
                              bool pch_port)
 {
+       enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+                                                                     pipe);
+       enum transcoder pch_transcoder;
        int reg;
        u32 val;
 
+       if (IS_HASWELL(dev_priv->dev))
+               pch_transcoder = TRANSCODER_A;
+       else
+               pch_transcoder = pipe;
+
        /*
         * A pipe without a PLL won't actually be able to drive bits from
         * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
@@ -1748,13 +1835,13 @@ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
        else {
                if (pch_port) {
                        /* if driving the PCH, we need FDI enabled */
-                       assert_fdi_rx_pll_enabled(dev_priv, pipe);
-                       assert_fdi_tx_pll_enabled(dev_priv, pipe);
+                       assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
+                       assert_fdi_tx_pll_enabled(dev_priv, cpu_transcoder);
                }
                /* FIXME: assert CPU port conditions for SNB+ */
        }
 
-       reg = PIPECONF(pipe);
+       reg = PIPECONF(cpu_transcoder);
        val = I915_READ(reg);
        if (val & PIPECONF_ENABLE)
                return;
@@ -1778,6 +1865,8 @@ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
 static void intel_disable_pipe(struct drm_i915_private *dev_priv,
                               enum pipe pipe)
 {
+       enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+                                                                     pipe);
        int reg;
        u32 val;
 
@@ -1791,7 +1880,7 @@ static void intel_disable_pipe(struct drm_i915_private *dev_priv,
        if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
                return;
 
-       reg = PIPECONF(pipe);
+       reg = PIPECONF(cpu_transcoder);
        val = I915_READ(reg);
        if ((val & PIPECONF_ENABLE) == 0)
                return;
@@ -1807,8 +1896,10 @@ static void intel_disable_pipe(struct drm_i915_private *dev_priv,
 void intel_flush_display_plane(struct drm_i915_private *dev_priv,
                                      enum plane plane)
 {
-       I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
-       I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
+       if (dev_priv->info->gen >= 4)
+               I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
+       else
+               I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
 }
 
 /**
@@ -1926,9 +2017,9 @@ void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
 
 /* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
  * is assumed to be a power-of-two. */
-static unsigned long gen4_compute_dspaddr_offset_xtiled(int *x, int *y,
-                                                       unsigned int bpp,
-                                                       unsigned int pitch)
+unsigned long intel_gen4_compute_offset_xtiled(int *x, int *y,
+                                              unsigned int bpp,
+                                              unsigned int pitch)
 {
        int tile_rows, tiles;
 
@@ -1969,24 +2060,38 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
        dspcntr = I915_READ(reg);
        /* Mask out pixel format bits in case we change it */
        dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
-       switch (fb->bits_per_pixel) {
-       case 8:
+       switch (fb->pixel_format) {
+       case DRM_FORMAT_C8:
                dspcntr |= DISPPLANE_8BPP;
                break;
-       case 16:
-               if (fb->depth == 15)
-                       dspcntr |= DISPPLANE_15_16BPP;
-               else
-                       dspcntr |= DISPPLANE_16BPP;
+       case DRM_FORMAT_XRGB1555:
+       case DRM_FORMAT_ARGB1555:
+               dspcntr |= DISPPLANE_BGRX555;
                break;
-       case 24:
-       case 32:
-               dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
+       case DRM_FORMAT_RGB565:
+               dspcntr |= DISPPLANE_BGRX565;
+               break;
+       case DRM_FORMAT_XRGB8888:
+       case DRM_FORMAT_ARGB8888:
+               dspcntr |= DISPPLANE_BGRX888;
+               break;
+       case DRM_FORMAT_XBGR8888:
+       case DRM_FORMAT_ABGR8888:
+               dspcntr |= DISPPLANE_RGBX888;
+               break;
+       case DRM_FORMAT_XRGB2101010:
+       case DRM_FORMAT_ARGB2101010:
+               dspcntr |= DISPPLANE_BGRX101010;
+               break;
+       case DRM_FORMAT_XBGR2101010:
+       case DRM_FORMAT_ABGR2101010:
+               dspcntr |= DISPPLANE_RGBX101010;
                break;
        default:
-               DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
+               DRM_ERROR("Unknown pixel format 0x%08x\n", fb->pixel_format);
                return -EINVAL;
        }
+
        if (INTEL_INFO(dev)->gen >= 4) {
                if (obj->tiling_mode != I915_TILING_NONE)
                        dspcntr |= DISPPLANE_TILED;
@@ -2000,9 +2105,9 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
 
        if (INTEL_INFO(dev)->gen >= 4) {
                intel_crtc->dspaddr_offset =
-                       gen4_compute_dspaddr_offset_xtiled(&x, &y,
-                                                          fb->bits_per_pixel / 8,
-                                                          fb->pitches[0]);
+                       intel_gen4_compute_offset_xtiled(&x, &y,
+                                                        fb->bits_per_pixel / 8,
+                                                        fb->pitches[0]);
                linear_offset -= intel_crtc->dspaddr_offset;
        } else {
                intel_crtc->dspaddr_offset = linear_offset;
@@ -2053,27 +2158,31 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
        dspcntr = I915_READ(reg);
        /* Mask out pixel format bits in case we change it */
        dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
-       switch (fb->bits_per_pixel) {
-       case 8:
+       switch (fb->pixel_format) {
+       case DRM_FORMAT_C8:
                dspcntr |= DISPPLANE_8BPP;
                break;
-       case 16:
-               if (fb->depth != 16)
-                       return -EINVAL;
-
-               dspcntr |= DISPPLANE_16BPP;
+       case DRM_FORMAT_RGB565:
+               dspcntr |= DISPPLANE_BGRX565;
                break;
-       case 24:
-       case 32:
-               if (fb->depth == 24)
-                       dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
-               else if (fb->depth == 30)
-                       dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA;
-               else
-                       return -EINVAL;
+       case DRM_FORMAT_XRGB8888:
+       case DRM_FORMAT_ARGB8888:
+               dspcntr |= DISPPLANE_BGRX888;
+               break;
+       case DRM_FORMAT_XBGR8888:
+       case DRM_FORMAT_ABGR8888:
+               dspcntr |= DISPPLANE_RGBX888;
+               break;
+       case DRM_FORMAT_XRGB2101010:
+       case DRM_FORMAT_ARGB2101010:
+               dspcntr |= DISPPLANE_BGRX101010;
+               break;
+       case DRM_FORMAT_XBGR2101010:
+       case DRM_FORMAT_ABGR2101010:
+               dspcntr |= DISPPLANE_RGBX101010;
                break;
        default:
-               DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
+               DRM_ERROR("Unknown pixel format 0x%08x\n", fb->pixel_format);
                return -EINVAL;
        }
 
@@ -2089,9 +2198,9 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
 
        linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
        intel_crtc->dspaddr_offset =
-               gen4_compute_dspaddr_offset_xtiled(&x, &y,
-                                                  fb->bits_per_pixel / 8,
-                                                  fb->pitches[0]);
+               intel_gen4_compute_offset_xtiled(&x, &y,
+                                                fb->bits_per_pixel / 8,
+                                                fb->pitches[0]);
        linear_offset -= intel_crtc->dspaddr_offset;
 
        DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n",
@@ -2099,8 +2208,12 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
        I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
        I915_MODIFY_DISPBASE(DSPSURF(plane),
                             obj->gtt_offset + intel_crtc->dspaddr_offset);
-       I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
-       I915_WRITE(DSPLINOFF(plane), linear_offset);
+       if (IS_HASWELL(dev)) {
+               I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
+       } else {
+               I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
+               I915_WRITE(DSPLINOFF(plane), linear_offset);
+       }
        POSTING_READ(reg);
 
        return 0;
@@ -2148,13 +2261,39 @@ intel_finish_fb(struct drm_framebuffer *old_fb)
        return ret;
 }
 
+static void intel_crtc_update_sarea_pos(struct drm_crtc *crtc, int x, int y)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_master_private *master_priv;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+       if (!dev->primary->master)
+               return;
+
+       master_priv = dev->primary->master->driver_priv;
+       if (!master_priv->sarea_priv)
+               return;
+
+       switch (intel_crtc->pipe) {
+       case 0:
+               master_priv->sarea_priv->pipeA_x = x;
+               master_priv->sarea_priv->pipeA_y = y;
+               break;
+       case 1:
+               master_priv->sarea_priv->pipeB_x = x;
+               master_priv->sarea_priv->pipeB_y = y;
+               break;
+       default:
+               break;
+       }
+}
+
 static int
 intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
                    struct drm_framebuffer *fb)
 {
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_i915_master_private *master_priv;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct drm_framebuffer *old_fb;
        int ret;
@@ -2206,20 +2345,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
        intel_update_fbc(dev);
        mutex_unlock(&dev->struct_mutex);
 
-       if (!dev->primary->master)
-               return 0;
-
-       master_priv = dev->primary->master->driver_priv;
-       if (!master_priv->sarea_priv)
-               return 0;
-
-       if (intel_crtc->pipe) {
-               master_priv->sarea_priv->pipeB_x = x;
-               master_priv->sarea_priv->pipeB_y = y;
-       } else {
-               master_priv->sarea_priv->pipeA_x = x;
-               master_priv->sarea_priv->pipeA_y = y;
-       }
+       intel_crtc_update_sarea_pos(crtc, x, y);
 
        return 0;
 }
@@ -2302,16 +2428,27 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
                           FDI_FE_ERRC_ENABLE);
 }
 
-static void cpt_phase_pointer_enable(struct drm_device *dev, int pipe)
+static void ivb_modeset_global_resources(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 flags = I915_READ(SOUTH_CHICKEN1);
+       struct intel_crtc *pipe_B_crtc =
+               to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
+       struct intel_crtc *pipe_C_crtc =
+               to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_C]);
+       uint32_t temp;
 
-       flags |= FDI_PHASE_SYNC_OVR(pipe);
-       I915_WRITE(SOUTH_CHICKEN1, flags); /* once to unlock... */
-       flags |= FDI_PHASE_SYNC_EN(pipe);
-       I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to enable */
-       POSTING_READ(SOUTH_CHICKEN1);
+       /* When everything is off disable fdi C so that we could enable fdi B
+        * with all lanes. XXX: This misses the case where a pipe is not using
+        * any pch resources and so doesn't need any fdi lanes. */
+       if (!pipe_B_crtc->base.enabled && !pipe_C_crtc->base.enabled) {
+               WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
+               WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
+
+               temp = I915_READ(SOUTH_CHICKEN1);
+               temp &= ~FDI_BC_BIFURCATION_SELECT;
+               DRM_DEBUG_KMS("disabling fdi C rx\n");
+               I915_WRITE(SOUTH_CHICKEN1, temp);
+       }
 }
 
 /* The FDI link training functions for ILK/Ibexpeak. */
@@ -2357,11 +2494,9 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
        udelay(150);
 
        /* Ironlake workaround, enable clock pointer after FDI enable*/
-       if (HAS_PCH_IBX(dev)) {
-               I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
-               I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
-                          FDI_RX_PHASE_SYNC_POINTER_EN);
-       }
+       I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
+       I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
+                  FDI_RX_PHASE_SYNC_POINTER_EN);
 
        reg = FDI_RX_IIR(pipe);
        for (tries = 0; tries < 5; tries++) {
@@ -2450,6 +2585,9 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
        temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
        I915_WRITE(reg, temp | FDI_TX_ENABLE);
 
+       I915_WRITE(FDI_RX_MISC(pipe),
+                  FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
+
        reg = FDI_RX_CTL(pipe);
        temp = I915_READ(reg);
        if (HAS_PCH_CPT(dev)) {
@@ -2464,9 +2602,6 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
        POSTING_READ(reg);
        udelay(150);
 
-       if (HAS_PCH_CPT(dev))
-               cpt_phase_pointer_enable(dev, pipe);
-
        for (i = 0; i < 4; i++) {
                reg = FDI_TX_CTL(pipe);
                temp = I915_READ(reg);
@@ -2570,6 +2705,9 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
        POSTING_READ(reg);
        udelay(150);
 
+       DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
+                     I915_READ(FDI_RX_IIR(pipe)));
+
        /* enable CPU FDI TX and PCH FDI RX */
        reg = FDI_TX_CTL(pipe);
        temp = I915_READ(reg);
@@ -2582,6 +2720,9 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
        temp |= FDI_COMPOSITE_SYNC;
        I915_WRITE(reg, temp | FDI_TX_ENABLE);
 
+       I915_WRITE(FDI_RX_MISC(pipe),
+                  FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
+
        reg = FDI_RX_CTL(pipe);
        temp = I915_READ(reg);
        temp &= ~FDI_LINK_TRAIN_AUTO;
@@ -2593,9 +2734,6 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
        POSTING_READ(reg);
        udelay(150);
 
-       if (HAS_PCH_CPT(dev))
-               cpt_phase_pointer_enable(dev, pipe);
-
        for (i = 0; i < 4; i++) {
                reg = FDI_TX_CTL(pipe);
                temp = I915_READ(reg);
@@ -2613,7 +2751,7 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
                if (temp & FDI_RX_BIT_LOCK ||
                    (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
                        I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
-                       DRM_DEBUG_KMS("FDI train 1 done.\n");
+                       DRM_DEBUG_KMS("FDI train 1 done, level %i.\n", i);
                        break;
                }
        }
@@ -2654,7 +2792,7 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
 
                if (temp & FDI_RX_SYMBOL_LOCK) {
                        I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
-                       DRM_DEBUG_KMS("FDI train 2 done.\n");
+                       DRM_DEBUG_KMS("FDI train 2 done, level %i.\n", i);
                        break;
                }
        }
@@ -2671,9 +2809,6 @@ static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
        int pipe = intel_crtc->pipe;
        u32 reg, temp;
 
-       /* Write the TU size bits so error detection works */
-       I915_WRITE(FDI_RX_TUSIZE1(pipe),
-                  I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
 
        /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
        reg = FDI_RX_CTL(pipe);
@@ -2737,17 +2872,6 @@ static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
        udelay(100);
 }
 
-static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 flags = I915_READ(SOUTH_CHICKEN1);
-
-       flags &= ~(FDI_PHASE_SYNC_EN(pipe));
-       I915_WRITE(SOUTH_CHICKEN1, flags); /* once to disable... */
-       flags &= ~(FDI_PHASE_SYNC_OVR(pipe));
-       I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to lock */
-       POSTING_READ(SOUTH_CHICKEN1);
-}
 static void ironlake_fdi_disable(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
@@ -2774,11 +2898,6 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
        /* Ironlake workaround, disable clock pointer after downing FDI */
        if (HAS_PCH_IBX(dev)) {
                I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
-               I915_WRITE(FDI_RX_CHICKEN(pipe),
-                          I915_READ(FDI_RX_CHICKEN(pipe) &
-                                    ~FDI_RX_PHASE_SYNC_POINTER_EN));
-       } else if (HAS_PCH_CPT(dev)) {
-               cpt_phase_pointer_disable(dev, pipe);
        }
 
        /* still set train pattern 1 */
@@ -2839,7 +2958,7 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
        mutex_unlock(&dev->struct_mutex);
 }
 
-static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
+static bool ironlake_crtc_driving_pch(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
        struct intel_encoder *intel_encoder;
@@ -2849,23 +2968,6 @@ static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
         * must be driven by its own crtc; no sharing is possible.
         */
        for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
-
-               /* On Haswell, LPT PCH handles the VGA connection via FDI, and Haswell
-                * CPU handles all others */
-               if (IS_HASWELL(dev)) {
-                       /* It is still unclear how this will work on PPT, so throw up a warning */
-                       WARN_ON(!HAS_PCH_LPT(dev));
-
-                       if (intel_encoder->type == INTEL_OUTPUT_ANALOG) {
-                               DRM_DEBUG_KMS("Haswell detected DAC encoder, assuming is PCH\n");
-                               return true;
-                       } else {
-                               DRM_DEBUG_KMS("Haswell detected encoder %d, assuming is CPU\n",
-                                             intel_encoder->type);
-                               return false;
-                       }
-               }
-
                switch (intel_encoder->type) {
                case INTEL_OUTPUT_EDP:
                        if (!intel_encoder_is_pch_edp(&intel_encoder->base))
@@ -2877,6 +2979,11 @@ static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
        return true;
 }
 
+static bool haswell_crtc_driving_pch(struct drm_crtc *crtc)
+{
+       return intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG);
+}
+
 /* Program iCLKIP clock to the desired frequency */
 static void lpt_program_iclkip(struct drm_crtc *crtc)
 {
@@ -2892,8 +2999,9 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
 
        /* Disable SSCCTL */
        intel_sbi_write(dev_priv, SBI_SSCCTL6,
-                               intel_sbi_read(dev_priv, SBI_SSCCTL6) |
-                                       SBI_SSCCTL_DISABLE);
+                       intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK) |
+                               SBI_SSCCTL_DISABLE,
+                       SBI_ICLK);
 
        /* 20MHz is a corner case which is out of range for the 7-bit divisor */
        if (crtc->mode.clock == 20000) {
@@ -2934,33 +3042,25 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
                        phaseinc);
 
        /* Program SSCDIVINTPHASE6 */
-       temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6);
+       temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
        temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
        temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
        temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
        temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
        temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
        temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
-
-       intel_sbi_write(dev_priv,
-                       SBI_SSCDIVINTPHASE6,
-                       temp);
+       intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
 
        /* Program SSCAUXDIV */
-       temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6);
+       temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
        temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
        temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
-       intel_sbi_write(dev_priv,
-                       SBI_SSCAUXDIV6,
-                       temp);
-
+       intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
 
        /* Enable modulator and associated divider */
-       temp = intel_sbi_read(dev_priv, SBI_SSCCTL6);
+       temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
        temp &= ~SBI_SSCCTL_DISABLE;
-       intel_sbi_write(dev_priv,
-                       SBI_SSCCTL6,
-                       temp);
+       intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
 
        /* Wait for initialization time */
        udelay(24);
@@ -2986,15 +3086,24 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
 
        assert_transcoder_disabled(dev_priv, pipe);
 
+       /* Write the TU size bits before fdi link training, so that error
+        * detection works. */
+       I915_WRITE(FDI_RX_TUSIZE1(pipe),
+                  I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
+
        /* For PCH output, training FDI link */
        dev_priv->display.fdi_link_train(crtc);
 
-       intel_enable_pch_pll(intel_crtc);
+       /* XXX: pch pll's can be enabled any time before we enable the PCH
+        * transcoder, and we actually should do this to not upset any PCH
+        * transcoder that already use the clock when we share it.
+        *
+        * Note that enable_pch_pll tries to do the right thing, but get_pch_pll
+        * unconditionally resets the pll - we need that to have the right LVDS
+        * enable sequence. */
+       ironlake_enable_pch_pll(intel_crtc);
 
-       if (HAS_PCH_LPT(dev)) {
-               DRM_DEBUG_KMS("LPT detected: programming iCLKIP\n");
-               lpt_program_iclkip(crtc);
-       } else if (HAS_PCH_CPT(dev)) {
+       if (HAS_PCH_CPT(dev)) {
                u32 sel;
 
                temp = I915_READ(PCH_DPLL_SEL);
@@ -3031,8 +3140,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
        I915_WRITE(TRANS_VSYNC(pipe),  I915_READ(VSYNC(pipe)));
        I915_WRITE(TRANS_VSYNCSHIFT(pipe),  I915_READ(VSYNCSHIFT(pipe)));
 
-       if (!IS_HASWELL(dev))
-               intel_fdi_normal_train(crtc);
+       intel_fdi_normal_train(crtc);
 
        /* For PCH DP, enable TRANS_DP_CTL */
        if (HAS_PCH_CPT(dev) &&
@@ -3064,15 +3172,37 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
                        temp |= TRANS_DP_PORT_SEL_D;
                        break;
                default:
-                       DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
-                       temp |= TRANS_DP_PORT_SEL_B;
-                       break;
+                       BUG();
                }
 
                I915_WRITE(reg, temp);
        }
 
-       intel_enable_transcoder(dev_priv, pipe);
+       ironlake_enable_pch_transcoder(dev_priv, pipe);
+}
+
+static void lpt_pch_enable(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+
+       assert_transcoder_disabled(dev_priv, TRANSCODER_A);
+
+       lpt_program_iclkip(crtc);
+
+       /* Set transcoder timing. */
+       I915_WRITE(_TRANS_HTOTAL_A, I915_READ(HTOTAL(cpu_transcoder)));
+       I915_WRITE(_TRANS_HBLANK_A, I915_READ(HBLANK(cpu_transcoder)));
+       I915_WRITE(_TRANS_HSYNC_A,  I915_READ(HSYNC(cpu_transcoder)));
+
+       I915_WRITE(_TRANS_VTOTAL_A, I915_READ(VTOTAL(cpu_transcoder)));
+       I915_WRITE(_TRANS_VBLANK_A, I915_READ(VBLANK(cpu_transcoder)));
+       I915_WRITE(_TRANS_VSYNC_A,  I915_READ(VSYNC(cpu_transcoder)));
+       I915_WRITE(_TRANS_VSYNCSHIFT_A, I915_READ(VSYNCSHIFT(cpu_transcoder)));
+
+       lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
 }
 
 static void intel_put_pch_pll(struct intel_crtc *intel_crtc)
@@ -3165,16 +3295,12 @@ prepare: /* separate function? */
 void intel_cpt_verify_modeset(struct drm_device *dev, int pipe)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       int dslreg = PIPEDSL(pipe), tc2reg = TRANS_CHICKEN2(pipe);
+       int dslreg = PIPEDSL(pipe);
        u32 temp;
 
        temp = I915_READ(dslreg);
        udelay(500);
        if (wait_for(I915_READ(dslreg) != temp, 5)) {
-               /* Without this, mode sets may fail silently on FDI */
-               I915_WRITE(tc2reg, TRANS_AUTOTRAIN_GEN_STALL_DIS);
-               udelay(250);
-               I915_WRITE(tc2reg, 0);
                if (wait_for(I915_READ(dslreg) != temp, 5))
                        DRM_ERROR("mode set failed: pipe %d stuck\n", pipe);
        }
@@ -3205,9 +3331,12 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
                        I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
        }
 
-       is_pch_port = intel_crtc_driving_pch(crtc);
+       is_pch_port = ironlake_crtc_driving_pch(crtc);
 
        if (is_pch_port) {
+               /* Note: FDI PLL enabling _must_ be done before we enable the
+                * cpu pipes, hence this is separate from all the other fdi/pch
+                * enabling. */
                ironlake_fdi_pll_enable(intel_crtc);
        } else {
                assert_fdi_tx_disabled(dev_priv, pipe);
@@ -3220,12 +3349,17 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
 
        /* Enable panel fitting for LVDS */
        if (dev_priv->pch_pf_size &&
-           (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) {
+           (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
+            intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
                /* Force use of hard-coded filter coefficients
                 * as some pre-programmed values are broken,
                 * e.g. x201.
                 */
-               I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
+               if (IS_IVYBRIDGE(dev))
+                       I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
+                                                PF_PIPE_SEL_IVB(pipe));
+               else
+                       I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
                I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
                I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
        }
@@ -3265,7 +3399,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
        intel_wait_for_vblank(dev, intel_crtc->pipe);
 }
 
-static void ironlake_crtc_disable(struct drm_crtc *crtc)
+static void haswell_crtc_enable(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3273,52 +3407,129 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
        struct intel_encoder *encoder;
        int pipe = intel_crtc->pipe;
        int plane = intel_crtc->plane;
-       u32 reg, temp;
+       bool is_pch_port;
 
+       WARN_ON(!crtc->enabled);
 
-       if (!intel_crtc->active)
+       if (intel_crtc->active)
                return;
 
-       for_each_encoder_on_crtc(dev, crtc, encoder)
-               encoder->disable(encoder);
+       intel_crtc->active = true;
+       intel_update_watermarks(dev);
 
-       intel_crtc_wait_for_pending_flips(crtc);
-       drm_vblank_off(dev, pipe);
-       intel_crtc_update_cursor(crtc, false);
+       is_pch_port = haswell_crtc_driving_pch(crtc);
 
-       intel_disable_plane(dev_priv, plane, pipe);
+       if (is_pch_port)
+               dev_priv->display.fdi_link_train(crtc);
 
-       if (dev_priv->cfb_plane == plane)
-               intel_disable_fbc(dev);
+       for_each_encoder_on_crtc(dev, crtc, encoder)
+               if (encoder->pre_enable)
+                       encoder->pre_enable(encoder);
 
-       intel_disable_pipe(dev_priv, pipe);
+       intel_ddi_enable_pipe_clock(intel_crtc);
 
-       /* Disable PF */
-       I915_WRITE(PF_CTL(pipe), 0);
-       I915_WRITE(PF_WIN_SZ(pipe), 0);
+       /* Enable panel fitting for eDP */
+       if (dev_priv->pch_pf_size &&
+           intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
+               /* Force use of hard-coded filter coefficients
+                * as some pre-programmed values are broken,
+                * e.g. x201.
+                */
+               I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
+                                        PF_PIPE_SEL_IVB(pipe));
+               I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
+               I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
+       }
 
-       for_each_encoder_on_crtc(dev, crtc, encoder)
-               if (encoder->post_disable)
-                       encoder->post_disable(encoder);
+       /*
+        * On ILK+ LUT must be loaded before the pipe is running but with
+        * clocks enabled
+        */
+       intel_crtc_load_lut(crtc);
 
-       ironlake_fdi_disable(crtc);
+       intel_ddi_set_pipe_settings(crtc);
+       intel_ddi_enable_pipe_func(crtc);
 
-       intel_disable_transcoder(dev_priv, pipe);
+       intel_enable_pipe(dev_priv, pipe, is_pch_port);
+       intel_enable_plane(dev_priv, plane, pipe);
 
-       if (HAS_PCH_CPT(dev)) {
-               /* disable TRANS_DP_CTL */
-               reg = TRANS_DP_CTL(pipe);
-               temp = I915_READ(reg);
-               temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
-               temp |= TRANS_DP_PORT_SEL_NONE;
-               I915_WRITE(reg, temp);
+       if (is_pch_port)
+               lpt_pch_enable(crtc);
 
-               /* disable DPLL_SEL */
-               temp = I915_READ(PCH_DPLL_SEL);
-               switch (pipe) {
-               case 0:
-                       temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL);
-                       break;
+       mutex_lock(&dev->struct_mutex);
+       intel_update_fbc(dev);
+       mutex_unlock(&dev->struct_mutex);
+
+       intel_crtc_update_cursor(crtc, true);
+
+       for_each_encoder_on_crtc(dev, crtc, encoder)
+               encoder->enable(encoder);
+
+       /*
+        * There seems to be a race in PCH platform hw (at least on some
+        * outputs) where an enabled pipe still completes any pageflip right
+        * away (as if the pipe is off) instead of waiting for vblank. As soon
+        * as the first vblank happend, everything works as expected. Hence just
+        * wait for one vblank before returning to avoid strange things
+        * happening.
+        */
+       intel_wait_for_vblank(dev, intel_crtc->pipe);
+}
+
+static void ironlake_crtc_disable(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct intel_encoder *encoder;
+       int pipe = intel_crtc->pipe;
+       int plane = intel_crtc->plane;
+       u32 reg, temp;
+
+
+       if (!intel_crtc->active)
+               return;
+
+       for_each_encoder_on_crtc(dev, crtc, encoder)
+               encoder->disable(encoder);
+
+       intel_crtc_wait_for_pending_flips(crtc);
+       drm_vblank_off(dev, pipe);
+       intel_crtc_update_cursor(crtc, false);
+
+       intel_disable_plane(dev_priv, plane, pipe);
+
+       if (dev_priv->cfb_plane == plane)
+               intel_disable_fbc(dev);
+
+       intel_disable_pipe(dev_priv, pipe);
+
+       /* Disable PF */
+       I915_WRITE(PF_CTL(pipe), 0);
+       I915_WRITE(PF_WIN_SZ(pipe), 0);
+
+       for_each_encoder_on_crtc(dev, crtc, encoder)
+               if (encoder->post_disable)
+                       encoder->post_disable(encoder);
+
+       ironlake_fdi_disable(crtc);
+
+       ironlake_disable_pch_transcoder(dev_priv, pipe);
+
+       if (HAS_PCH_CPT(dev)) {
+               /* disable TRANS_DP_CTL */
+               reg = TRANS_DP_CTL(pipe);
+               temp = I915_READ(reg);
+               temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
+               temp |= TRANS_DP_PORT_SEL_NONE;
+               I915_WRITE(reg, temp);
+
+               /* disable DPLL_SEL */
+               temp = I915_READ(PCH_DPLL_SEL);
+               switch (pipe) {
+               case 0:
+                       temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL);
+                       break;
                case 1:
                        temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
                        break;
@@ -3345,12 +3556,78 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
        mutex_unlock(&dev->struct_mutex);
 }
 
+static void haswell_crtc_disable(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct intel_encoder *encoder;
+       int pipe = intel_crtc->pipe;
+       int plane = intel_crtc->plane;
+       enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+       bool is_pch_port;
+
+       if (!intel_crtc->active)
+               return;
+
+       is_pch_port = haswell_crtc_driving_pch(crtc);
+
+       for_each_encoder_on_crtc(dev, crtc, encoder)
+               encoder->disable(encoder);
+
+       intel_crtc_wait_for_pending_flips(crtc);
+       drm_vblank_off(dev, pipe);
+       intel_crtc_update_cursor(crtc, false);
+
+       intel_disable_plane(dev_priv, plane, pipe);
+
+       if (dev_priv->cfb_plane == plane)
+               intel_disable_fbc(dev);
+
+       intel_disable_pipe(dev_priv, pipe);
+
+       intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
+
+       /* Disable PF */
+       I915_WRITE(PF_CTL(pipe), 0);
+       I915_WRITE(PF_WIN_SZ(pipe), 0);
+
+       intel_ddi_disable_pipe_clock(intel_crtc);
+
+       for_each_encoder_on_crtc(dev, crtc, encoder)
+               if (encoder->post_disable)
+                       encoder->post_disable(encoder);
+
+       if (is_pch_port) {
+               lpt_disable_pch_transcoder(dev_priv);
+               intel_ddi_fdi_disable(crtc);
+       }
+
+       intel_crtc->active = false;
+       intel_update_watermarks(dev);
+
+       mutex_lock(&dev->struct_mutex);
+       intel_update_fbc(dev);
+       mutex_unlock(&dev->struct_mutex);
+}
+
 static void ironlake_crtc_off(struct drm_crtc *crtc)
 {
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        intel_put_pch_pll(intel_crtc);
 }
 
+static void haswell_crtc_off(struct drm_crtc *crtc)
+{
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+       /* Stop saying we're using TRANSCODER_EDP because some other CRTC might
+        * start using it. */
+       intel_crtc->cpu_transcoder = intel_crtc->pipe;
+
+       intel_ddi_put_crtc_pll(crtc);
+}
+
 static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
 {
        if (!enable && intel_crtc->overlay) {
@@ -4061,7 +4338,7 @@ static void vlv_update_pll(struct drm_crtc *crtc,
                           struct drm_display_mode *mode,
                           struct drm_display_mode *adjusted_mode,
                           intel_clock_t *clock, intel_clock_t *reduced_clock,
-                          int refclk, int num_connectors)
+                          int num_connectors)
 {
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4069,9 +4346,19 @@ static void vlv_update_pll(struct drm_crtc *crtc,
        int pipe = intel_crtc->pipe;
        u32 dpll, mdiv, pdiv;
        u32 bestn, bestm1, bestm2, bestp1, bestp2;
-       bool is_hdmi;
+       bool is_sdvo;
+       u32 temp;
+
+       is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ||
+               intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
+
+       dpll = DPLL_VGA_MODE_DIS;
+       dpll |= DPLL_EXT_BUFFER_ENABLE_VLV;
+       dpll |= DPLL_REFA_CLK_ENABLE_VLV;
+       dpll |= DPLL_INTEGRATED_CLOCK_VLV;
 
-       is_hdmi = intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
+       I915_WRITE(DPLL(pipe), dpll);
+       POSTING_READ(DPLL(pipe));
 
        bestn = clock->n;
        bestm1 = clock->m1;
@@ -4079,12 +4366,10 @@ static void vlv_update_pll(struct drm_crtc *crtc,
        bestp1 = clock->p1;
        bestp2 = clock->p2;
 
-       /* Enable DPIO clock input */
-       dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
-               DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
-       I915_WRITE(DPLL(pipe), dpll);
-       POSTING_READ(DPLL(pipe));
-
+       /*
+        * In Valleyview PLL and program lane counter registers are exposed
+        * through DPIO interface
+        */
        mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
        mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
        mdiv |= ((bestn << DPIO_N_SHIFT));
@@ -4095,12 +4380,13 @@ static void vlv_update_pll(struct drm_crtc *crtc,
 
        intel_dpio_write(dev_priv, DPIO_CORE_CLK(pipe), 0x01000000);
 
-       pdiv = DPIO_REFSEL_OVERRIDE | (5 << DPIO_PLL_MODESEL_SHIFT) |
+       pdiv = (1 << DPIO_REFSEL_OVERRIDE) | (5 << DPIO_PLL_MODESEL_SHIFT) |
                (3 << DPIO_BIAS_CURRENT_CTL_SHIFT) | (1<<20) |
-               (8 << DPIO_DRIVER_CTL_SHIFT) | (5 << DPIO_CLK_BIAS_CTL_SHIFT);
+               (7 << DPIO_PLL_REFCLK_SEL_SHIFT) | (8 << DPIO_DRIVER_CTL_SHIFT) |
+               (5 << DPIO_CLK_BIAS_CTL_SHIFT);
        intel_dpio_write(dev_priv, DPIO_REFSFR(pipe), pdiv);
 
-       intel_dpio_write(dev_priv, DPIO_LFP_COEFF(pipe), 0x009f0051);
+       intel_dpio_write(dev_priv, DPIO_LFP_COEFF(pipe), 0x005f003b);
 
        dpll |= DPLL_VCO_ENABLE;
        I915_WRITE(DPLL(pipe), dpll);
@@ -4108,19 +4394,44 @@ static void vlv_update_pll(struct drm_crtc *crtc,
        if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
                DRM_ERROR("DPLL %d failed to lock\n", pipe);
 
-       if (is_hdmi) {
-               u32 temp = intel_mode_get_pixel_multiplier(adjusted_mode);
+       intel_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x620);
+
+       if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
+               intel_dp_set_m_n(crtc, mode, adjusted_mode);
+
+       I915_WRITE(DPLL(pipe), dpll);
+
+       /* Wait for the clocks to stabilize. */
+       POSTING_READ(DPLL(pipe));
+       udelay(150);
 
+       temp = 0;
+       if (is_sdvo) {
+               temp = intel_mode_get_pixel_multiplier(adjusted_mode);
                if (temp > 1)
                        temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
                else
                        temp = 0;
-
-               I915_WRITE(DPLL_MD(pipe), temp);
-               POSTING_READ(DPLL_MD(pipe));
        }
+       I915_WRITE(DPLL_MD(pipe), temp);
+       POSTING_READ(DPLL_MD(pipe));
 
-       intel_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x641); /* ??? */
+       /* Now program lane control registers */
+       if(intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)
+                       || intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
+       {
+               temp = 0x1000C4;
+               if(pipe == 1)
+                       temp |= (1 << 21);
+               intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL1, temp);
+       }
+       if(intel_pipe_has_type(crtc,INTEL_OUTPUT_EDP))
+       {
+               temp = 0x1000C4;
+               if(pipe == 1)
+                       temp |= (1 << 21);
+               intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL2, temp);
+       }
 }
 
 static void i9xx_update_pll(struct drm_crtc *crtc,
@@ -4136,6 +4447,8 @@ static void i9xx_update_pll(struct drm_crtc *crtc,
        u32 dpll;
        bool is_sdvo;
 
+       i9xx_update_pll_dividers(crtc, clock, reduced_clock);
+
        is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ||
                intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
 
@@ -4236,7 +4549,7 @@ static void i9xx_update_pll(struct drm_crtc *crtc,
 
 static void i8xx_update_pll(struct drm_crtc *crtc,
                            struct drm_display_mode *adjusted_mode,
-                           intel_clock_t *clock,
+                           intel_clock_t *clock, intel_clock_t *reduced_clock,
                            int num_connectors)
 {
        struct drm_device *dev = crtc->dev;
@@ -4245,6 +4558,8 @@ static void i8xx_update_pll(struct drm_crtc *crtc,
        int pipe = intel_crtc->pipe;
        u32 dpll;
 
+       i9xx_update_pll_dividers(crtc, clock, reduced_clock);
+
        dpll = DPLL_VGA_MODE_DIS;
 
        if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
@@ -4294,6 +4609,64 @@ static void i8xx_update_pll(struct drm_crtc *crtc,
        I915_WRITE(DPLL(pipe), dpll);
 }
 
+static void intel_set_pipe_timings(struct intel_crtc *intel_crtc,
+                                  struct drm_display_mode *mode,
+                                  struct drm_display_mode *adjusted_mode)
+{
+       struct drm_device *dev = intel_crtc->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       enum pipe pipe = intel_crtc->pipe;
+       enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+       uint32_t vsyncshift;
+
+       if (!IS_GEN2(dev) && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
+               /* the chip adds 2 halflines automatically */
+               adjusted_mode->crtc_vtotal -= 1;
+               adjusted_mode->crtc_vblank_end -= 1;
+               vsyncshift = adjusted_mode->crtc_hsync_start
+                            - adjusted_mode->crtc_htotal / 2;
+       } else {
+               vsyncshift = 0;
+       }
+
+       if (INTEL_INFO(dev)->gen > 3)
+               I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
+
+       I915_WRITE(HTOTAL(cpu_transcoder),
+                  (adjusted_mode->crtc_hdisplay - 1) |
+                  ((adjusted_mode->crtc_htotal - 1) << 16));
+       I915_WRITE(HBLANK(cpu_transcoder),
+                  (adjusted_mode->crtc_hblank_start - 1) |
+                  ((adjusted_mode->crtc_hblank_end - 1) << 16));
+       I915_WRITE(HSYNC(cpu_transcoder),
+                  (adjusted_mode->crtc_hsync_start - 1) |
+                  ((adjusted_mode->crtc_hsync_end - 1) << 16));
+
+       I915_WRITE(VTOTAL(cpu_transcoder),
+                  (adjusted_mode->crtc_vdisplay - 1) |
+                  ((adjusted_mode->crtc_vtotal - 1) << 16));
+       I915_WRITE(VBLANK(cpu_transcoder),
+                  (adjusted_mode->crtc_vblank_start - 1) |
+                  ((adjusted_mode->crtc_vblank_end - 1) << 16));
+       I915_WRITE(VSYNC(cpu_transcoder),
+                  (adjusted_mode->crtc_vsync_start - 1) |
+                  ((adjusted_mode->crtc_vsync_end - 1) << 16));
+
+       /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
+        * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
+        * documented on the DDI_FUNC_CTL register description, EDP Input Select
+        * bits. */
+       if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP &&
+           (pipe == PIPE_B || pipe == PIPE_C))
+               I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
+
+       /* pipesrc controls the size that is scaled from, which should
+        * always be the user's requested size.
+        */
+       I915_WRITE(PIPESRC(pipe),
+                  ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
+}
+
 static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
                              struct drm_display_mode *mode,
                              struct drm_display_mode *adjusted_mode,
@@ -4307,7 +4680,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
        int plane = intel_crtc->plane;
        int refclk, num_connectors = 0;
        intel_clock_t clock, reduced_clock;
-       u32 dspcntr, pipeconf, vsyncshift;
+       u32 dspcntr, pipeconf;
        bool ok, has_reduced_clock = false, is_sdvo = false;
        bool is_lvds = false, is_tv = false, is_dp = false;
        struct intel_encoder *encoder;
@@ -4371,14 +4744,14 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
        if (is_sdvo && is_tv)
                i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock);
 
-       i9xx_update_pll_dividers(crtc, &clock, has_reduced_clock ?
-                                &reduced_clock : NULL);
-
        if (IS_GEN2(dev))
-               i8xx_update_pll(crtc, adjusted_mode, &clock, num_connectors);
+               i8xx_update_pll(crtc, adjusted_mode, &clock,
+                               has_reduced_clock ? &reduced_clock : NULL,
+                               num_connectors);
        else if (IS_VALLEYVIEW(dev))
-               vlv_update_pll(crtc, mode,adjusted_mode, &clock, NULL,
-                              refclk, num_connectors);
+               vlv_update_pll(crtc, mode, adjusted_mode, &clock,
+                               has_reduced_clock ? &reduced_clock : NULL,
+                               num_connectors);
        else
                i9xx_update_pll(crtc, mode, adjusted_mode, &clock,
                                has_reduced_clock ? &reduced_clock : NULL,
@@ -4419,6 +4792,14 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
                }
        }
 
+       if (IS_VALLEYVIEW(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
+               if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
+                       pipeconf |= PIPECONF_BPP_6 |
+                                       PIPECONF_ENABLE |
+                                       I965_PIPECONF_ACTIVE;
+               }
+       }
+
        DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
        drm_mode_debug_printmodeline(mode);
 
@@ -4434,40 +4815,12 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
 
        pipeconf &= ~PIPECONF_INTERLACE_MASK;
        if (!IS_GEN2(dev) &&
-           adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
+           adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
                pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
-               /* the chip adds 2 halflines automatically */
-               adjusted_mode->crtc_vtotal -= 1;
-               adjusted_mode->crtc_vblank_end -= 1;
-               vsyncshift = adjusted_mode->crtc_hsync_start
-                            - adjusted_mode->crtc_htotal/2;
-       } else {
+       else
                pipeconf |= PIPECONF_PROGRESSIVE;
-               vsyncshift = 0;
-       }
 
-       if (!IS_GEN3(dev))
-               I915_WRITE(VSYNCSHIFT(pipe), vsyncshift);
-
-       I915_WRITE(HTOTAL(pipe),
-                  (adjusted_mode->crtc_hdisplay - 1) |
-                  ((adjusted_mode->crtc_htotal - 1) << 16));
-       I915_WRITE(HBLANK(pipe),
-                  (adjusted_mode->crtc_hblank_start - 1) |
-                  ((adjusted_mode->crtc_hblank_end - 1) << 16));
-       I915_WRITE(HSYNC(pipe),
-                  (adjusted_mode->crtc_hsync_start - 1) |
-                  ((adjusted_mode->crtc_hsync_end - 1) << 16));
-
-       I915_WRITE(VTOTAL(pipe),
-                  (adjusted_mode->crtc_vdisplay - 1) |
-                  ((adjusted_mode->crtc_vtotal - 1) << 16));
-       I915_WRITE(VBLANK(pipe),
-                  (adjusted_mode->crtc_vblank_start - 1) |
-                  ((adjusted_mode->crtc_vblank_end - 1) << 16));
-       I915_WRITE(VSYNC(pipe),
-                  (adjusted_mode->crtc_vsync_start - 1) |
-                  ((adjusted_mode->crtc_vsync_end - 1) << 16));
+       intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
 
        /* pipesrc and dspsize control the size that is scaled from,
         * which should always be the user's requested size.
@@ -4476,8 +4829,6 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
                   ((mode->vdisplay - 1) << 16) |
                   (mode->hdisplay - 1));
        I915_WRITE(DSPPOS(plane), 0);
-       I915_WRITE(PIPESRC(pipe),
-                  ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
 
        I915_WRITE(PIPECONF(pipe), pipeconf);
        POSTING_READ(PIPECONF(pipe));
@@ -4495,10 +4846,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
        return ret;
 }
 
-/*
- * Initialize reference clocks when the driver loads
- */
-void ironlake_init_pch_refclk(struct drm_device *dev)
+static void ironlake_init_pch_refclk(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_mode_config *mode_config = &dev->mode_config;
@@ -4612,6 +4960,182 @@ void ironlake_init_pch_refclk(struct drm_device *dev)
        }
 }
 
+/* Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O. */
+static void lpt_init_pch_refclk(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_mode_config *mode_config = &dev->mode_config;
+       struct intel_encoder *encoder;
+       bool has_vga = false;
+       bool is_sdv = false;
+       u32 tmp;
+
+       list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
+               switch (encoder->type) {
+               case INTEL_OUTPUT_ANALOG:
+                       has_vga = true;
+                       break;
+               }
+       }
+
+       if (!has_vga)
+               return;
+
+       /* XXX: Rip out SDV support once Haswell ships for real. */
+       if (IS_HASWELL(dev) && (dev->pci_device & 0xFF00) == 0x0C00)
+               is_sdv = true;
+
+       tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
+       tmp &= ~SBI_SSCCTL_DISABLE;
+       tmp |= SBI_SSCCTL_PATHALT;
+       intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+
+       udelay(24);
+
+       tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
+       tmp &= ~SBI_SSCCTL_PATHALT;
+       intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+
+       if (!is_sdv) {
+               tmp = I915_READ(SOUTH_CHICKEN2);
+               tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
+               I915_WRITE(SOUTH_CHICKEN2, tmp);
+
+               if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
+                                      FDI_MPHY_IOSFSB_RESET_STATUS, 100))
+                       DRM_ERROR("FDI mPHY reset assert timeout\n");
+
+               tmp = I915_READ(SOUTH_CHICKEN2);
+               tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
+               I915_WRITE(SOUTH_CHICKEN2, tmp);
+
+               if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
+                                       FDI_MPHY_IOSFSB_RESET_STATUS) == 0,
+                                      100))
+                       DRM_ERROR("FDI mPHY reset de-assert timeout\n");
+       }
+
+       tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
+       tmp &= ~(0xFF << 24);
+       tmp |= (0x12 << 24);
+       intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
+
+       if (!is_sdv) {
+               tmp = intel_sbi_read(dev_priv, 0x808C, SBI_MPHY);
+               tmp &= ~(0x3 << 6);
+               tmp |= (1 << 6) | (1 << 0);
+               intel_sbi_write(dev_priv, 0x808C, tmp, SBI_MPHY);
+       }
+
+       if (is_sdv) {
+               tmp = intel_sbi_read(dev_priv, 0x800C, SBI_MPHY);
+               tmp |= 0x7FFF;
+               intel_sbi_write(dev_priv, 0x800C, tmp, SBI_MPHY);
+       }
+
+       tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
+       tmp |= (1 << 11);
+       intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
+
+       tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
+       tmp |= (1 << 11);
+       intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
+
+       if (is_sdv) {
+               tmp = intel_sbi_read(dev_priv, 0x2038, SBI_MPHY);
+               tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16);
+               intel_sbi_write(dev_priv, 0x2038, tmp, SBI_MPHY);
+
+               tmp = intel_sbi_read(dev_priv, 0x2138, SBI_MPHY);
+               tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16);
+               intel_sbi_write(dev_priv, 0x2138, tmp, SBI_MPHY);
+
+               tmp = intel_sbi_read(dev_priv, 0x203C, SBI_MPHY);
+               tmp |= (0x3F << 8);
+               intel_sbi_write(dev_priv, 0x203C, tmp, SBI_MPHY);
+
+               tmp = intel_sbi_read(dev_priv, 0x213C, SBI_MPHY);
+               tmp |= (0x3F << 8);
+               intel_sbi_write(dev_priv, 0x213C, tmp, SBI_MPHY);
+       }
+
+       tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
+       tmp |= (1 << 24) | (1 << 21) | (1 << 18);
+       intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
+
+       tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
+       tmp |= (1 << 24) | (1 << 21) | (1 << 18);
+       intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
+
+       if (!is_sdv) {
+               tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
+               tmp &= ~(7 << 13);
+               tmp |= (5 << 13);
+               intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
+
+               tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
+               tmp &= ~(7 << 13);
+               tmp |= (5 << 13);
+               intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
+       }
+
+       tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
+       tmp &= ~0xFF;
+       tmp |= 0x1C;
+       intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
+
+       tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
+       tmp &= ~0xFF;
+       tmp |= 0x1C;
+       intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
+
+       tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
+       tmp &= ~(0xFF << 16);
+       tmp |= (0x1C << 16);
+       intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
+
+       tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
+       tmp &= ~(0xFF << 16);
+       tmp |= (0x1C << 16);
+       intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
+
+       if (!is_sdv) {
+               tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
+               tmp |= (1 << 27);
+               intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
+
+               tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
+               tmp |= (1 << 27);
+               intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
+
+               tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
+               tmp &= ~(0xF << 28);
+               tmp |= (4 << 28);
+               intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
+
+               tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
+               tmp &= ~(0xF << 28);
+               tmp |= (4 << 28);
+               intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
+       }
+
+       /* ULT uses SBI_GEN0, but ULT doesn't have VGA, so we don't care. */
+       tmp = intel_sbi_read(dev_priv, SBI_DBUFF0, SBI_ICLK);
+       tmp |= SBI_DBUFF0_ENABLE;
+       intel_sbi_write(dev_priv, SBI_DBUFF0, tmp, SBI_ICLK);
+}
+
+/*
+ * Initialize reference clocks when the driver loads
+ */
+void intel_init_pch_refclk(struct drm_device *dev)
+{
+       if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+               ironlake_init_pch_refclk(dev);
+       else if (HAS_PCH_LPT(dev))
+               lpt_init_pch_refclk(dev);
+}
+
 static int ironlake_get_refclk(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
@@ -4668,8 +5192,8 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc,
                val |= PIPE_12BPC;
                break;
        default:
-               val |= PIPE_8BPC;
-               break;
+               /* Case prevented by intel_choose_pipe_bpp_dither. */
+               BUG();
        }
 
        val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK);
@@ -4686,6 +5210,31 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc,
        POSTING_READ(PIPECONF(pipe));
 }
 
+static void haswell_set_pipeconf(struct drm_crtc *crtc,
+                                struct drm_display_mode *adjusted_mode,
+                                bool dither)
+{
+       struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+       uint32_t val;
+
+       val = I915_READ(PIPECONF(cpu_transcoder));
+
+       val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK);
+       if (dither)
+               val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
+
+       val &= ~PIPECONF_INTERLACE_MASK_HSW;
+       if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+               val |= PIPECONF_INTERLACED_ILK;
+       else
+               val |= PIPECONF_PROGRESSIVE;
+
+       I915_WRITE(PIPECONF(cpu_transcoder), val);
+       POSTING_READ(PIPECONF(cpu_transcoder));
+}
+
 static bool ironlake_compute_clocks(struct drm_crtc *crtc,
                                    struct drm_display_mode *adjusted_mode,
                                    intel_clock_t *clock,
@@ -4749,74 +5298,126 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc,
        return true;
 }
 
-static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
-                                 struct drm_display_mode *mode,
-                                 struct drm_display_mode *adjusted_mode,
-                                 int x, int y,
-                                 struct drm_framebuffer *fb)
+static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       uint32_t temp;
+
+       temp = I915_READ(SOUTH_CHICKEN1);
+       if (temp & FDI_BC_BIFURCATION_SELECT)
+               return;
+
+       WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
+       WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
+
+       temp |= FDI_BC_BIFURCATION_SELECT;
+       DRM_DEBUG_KMS("enabling fdi C rx\n");
+       I915_WRITE(SOUTH_CHICKEN1, temp);
+       POSTING_READ(SOUTH_CHICKEN1);
+}
+
+static bool ironlake_check_fdi_lanes(struct intel_crtc *intel_crtc)
+{
+       struct drm_device *dev = intel_crtc->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *pipe_B_crtc =
+               to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
+
+       DRM_DEBUG_KMS("checking fdi config on pipe %i, lanes %i\n",
+                     intel_crtc->pipe, intel_crtc->fdi_lanes);
+       if (intel_crtc->fdi_lanes > 4) {
+               DRM_DEBUG_KMS("invalid fdi lane config on pipe %i: %i lanes\n",
+                             intel_crtc->pipe, intel_crtc->fdi_lanes);
+               /* Clamp lanes to avoid programming the hw with bogus values. */
+               intel_crtc->fdi_lanes = 4;
+
+               return false;
+       }
+
+       if (dev_priv->num_pipe == 2)
+               return true;
+
+       switch (intel_crtc->pipe) {
+       case PIPE_A:
+               return true;
+       case PIPE_B:
+               if (dev_priv->pipe_to_crtc_mapping[PIPE_C]->enabled &&
+                   intel_crtc->fdi_lanes > 2) {
+                       DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %i: %i lanes\n",
+                                     intel_crtc->pipe, intel_crtc->fdi_lanes);
+                       /* Clamp lanes to avoid programming the hw with bogus values. */
+                       intel_crtc->fdi_lanes = 2;
+
+                       return false;
+               }
+
+               if (intel_crtc->fdi_lanes > 2)
+                       WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT);
+               else
+                       cpt_enable_fdi_bc_bifurcation(dev);
+
+               return true;
+       case PIPE_C:
+               if (!pipe_B_crtc->base.enabled || pipe_B_crtc->fdi_lanes <= 2) {
+                       if (intel_crtc->fdi_lanes > 2) {
+                               DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %i: %i lanes\n",
+                                             intel_crtc->pipe, intel_crtc->fdi_lanes);
+                               /* Clamp lanes to avoid programming the hw with bogus values. */
+                               intel_crtc->fdi_lanes = 2;
+
+                               return false;
+                       }
+               } else {
+                       DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
+                       return false;
+               }
+
+               cpt_enable_fdi_bc_bifurcation(dev);
+
+               return true;
+       default:
+               BUG();
+       }
+}
+
+int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
+{
+       /*
+        * Account for spread spectrum to avoid
+        * oversubscribing the link. Max center spread
+        * is 2.5%; use 5% for safety's sake.
+        */
+       u32 bps = target_clock * bpp * 21 / 20;
+       return bps / (link_bw * 8) + 1;
+}
+
+static void ironlake_set_m_n(struct drm_crtc *crtc,
+                            struct drm_display_mode *mode,
+                            struct drm_display_mode *adjusted_mode)
 {
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       int pipe = intel_crtc->pipe;
-       int plane = intel_crtc->plane;
-       int num_connectors = 0;
-       intel_clock_t clock, reduced_clock;
-       u32 dpll, fp = 0, fp2 = 0;
-       bool ok, has_reduced_clock = false, is_sdvo = false;
-       bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
-       struct intel_encoder *encoder, *edp_encoder = NULL;
-       int ret;
+       enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+       struct intel_encoder *intel_encoder, *edp_encoder = NULL;
        struct fdi_m_n m_n = {0};
-       u32 temp;
-       int target_clock, pixel_multiplier, lane, link_bw, factor;
-       unsigned int pipe_bpp;
-       bool dither;
-       bool is_cpu_edp = false, is_pch_edp = false;
+       int target_clock, pixel_multiplier, lane, link_bw;
+       bool is_dp = false, is_cpu_edp = false;
 
-       for_each_encoder_on_crtc(dev, crtc, encoder) {
-               switch (encoder->type) {
-               case INTEL_OUTPUT_LVDS:
-                       is_lvds = true;
-                       break;
-               case INTEL_OUTPUT_SDVO:
-               case INTEL_OUTPUT_HDMI:
-                       is_sdvo = true;
-                       if (encoder->needs_tv_clock)
-                               is_tv = true;
-                       break;
-               case INTEL_OUTPUT_TVOUT:
-                       is_tv = true;
-                       break;
-               case INTEL_OUTPUT_ANALOG:
-                       is_crt = true;
-                       break;
+       for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+               switch (intel_encoder->type) {
                case INTEL_OUTPUT_DISPLAYPORT:
                        is_dp = true;
                        break;
                case INTEL_OUTPUT_EDP:
                        is_dp = true;
-                       if (intel_encoder_is_pch_edp(&encoder->base))
-                               is_pch_edp = true;
-                       else
+                       if (!intel_encoder_is_pch_edp(&intel_encoder->base))
                                is_cpu_edp = true;
-                       edp_encoder = encoder;
+                       edp_encoder = intel_encoder;
                        break;
                }
-
-               num_connectors++;
-       }
-
-       ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock,
-                                    &has_reduced_clock, &reduced_clock);
-       if (!ok) {
-               DRM_ERROR("Couldn't find PLL settings for mode!\n");
-               return -EINVAL;
        }
 
-       /* Ensure that the cursor is valid for the new mode before changing... */
-       intel_crtc_update_cursor(crtc, true);
-
        /* FDI link */
        pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
        lane = 0;
@@ -4843,29 +5444,9 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
        else
                target_clock = adjusted_mode->clock;
 
-       /* determine panel color depth */
-       dither = intel_choose_pipe_bpp_dither(crtc, fb, &pipe_bpp,
-                                             adjusted_mode);
-       if (is_lvds && dev_priv->lvds_dither)
-               dither = true;
-
-       if (pipe_bpp != 18 && pipe_bpp != 24 && pipe_bpp != 30 &&
-           pipe_bpp != 36) {
-               WARN(1, "intel_choose_pipe_bpp returned invalid value %d\n",
-                    pipe_bpp);
-               pipe_bpp = 24;
-       }
-       intel_crtc->bpp = pipe_bpp;
-
-       if (!lane) {
-               /*
-                * Account for spread spectrum to avoid
-                * oversubscribing the link. Max center spread
-                * is 2.5%; use 5% for safety's sake.
-                */
-               u32 bps = target_clock * intel_crtc->bpp * 21 / 20;
-               lane = bps / (link_bw * 8) + 1;
-       }
+       if (!lane)
+               lane = ironlake_get_lanes_required(target_clock, link_bw,
+                                                  intel_crtc->bpp);
 
        intel_crtc->fdi_lanes = lane;
 
@@ -4874,10 +5455,51 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
        ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw,
                             &m_n);
 
-       fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
-       if (has_reduced_clock)
-               fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
-                       reduced_clock.m2;
+       I915_WRITE(PIPE_DATA_M1(cpu_transcoder), TU_SIZE(m_n.tu) | m_n.gmch_m);
+       I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n);
+       I915_WRITE(PIPE_LINK_M1(cpu_transcoder), m_n.link_m);
+       I915_WRITE(PIPE_LINK_N1(cpu_transcoder), m_n.link_n);
+}
+
+static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
+                                     struct drm_display_mode *adjusted_mode,
+                                     intel_clock_t *clock, u32 fp)
+{
+       struct drm_crtc *crtc = &intel_crtc->base;
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_encoder *intel_encoder;
+       uint32_t dpll;
+       int factor, pixel_multiplier, num_connectors = 0;
+       bool is_lvds = false, is_sdvo = false, is_tv = false;
+       bool is_dp = false, is_cpu_edp = false;
+
+       for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+               switch (intel_encoder->type) {
+               case INTEL_OUTPUT_LVDS:
+                       is_lvds = true;
+                       break;
+               case INTEL_OUTPUT_SDVO:
+               case INTEL_OUTPUT_HDMI:
+                       is_sdvo = true;
+                       if (intel_encoder->needs_tv_clock)
+                               is_tv = true;
+                       break;
+               case INTEL_OUTPUT_TVOUT:
+                       is_tv = true;
+                       break;
+               case INTEL_OUTPUT_DISPLAYPORT:
+                       is_dp = true;
+                       break;
+               case INTEL_OUTPUT_EDP:
+                       is_dp = true;
+                       if (!intel_encoder_is_pch_edp(&intel_encoder->base))
+                               is_cpu_edp = true;
+                       break;
+               }
+
+               num_connectors++;
+       }
 
        /* Enable autotuning of the PLL clock (if permissible) */
        factor = 21;
@@ -4889,7 +5511,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
        } else if (is_sdvo && is_tv)
                factor = 20;
 
-       if (clock.m < factor * clock.n)
+       if (clock->m < factor * clock->n)
                fp |= FP_CB_TUNE;
 
        dpll = 0;
@@ -4899,7 +5521,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
        else
                dpll |= DPLLB_MODE_DAC_SERIAL;
        if (is_sdvo) {
-               int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
+               pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
                if (pixel_multiplier > 1) {
                        dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
                }
@@ -4909,11 +5531,11 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
                dpll |= DPLL_DVO_HIGH_SPEED;
 
        /* compute bitmask from p1 value */
-       dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+       dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
        /* also FPA1 */
-       dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
+       dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
 
-       switch (clock.p2) {
+       switch (clock->p2) {
        case 5:
                dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
                break;
@@ -4928,26 +5550,90 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
                break;
        }
 
-       if (is_sdvo && is_tv)
-               dpll |= PLL_REF_INPUT_TVCLKINBC;
-       else if (is_tv)
-               /* XXX: just matching BIOS for now */
-               /*      dpll |= PLL_REF_INPUT_TVCLKINBC; */
-               dpll |= 3;
-       else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
-               dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
-       else
-               dpll |= PLL_REF_INPUT_DREFCLK;
+       if (is_sdvo && is_tv)
+               dpll |= PLL_REF_INPUT_TVCLKINBC;
+       else if (is_tv)
+               /* XXX: just matching BIOS for now */
+               /*      dpll |= PLL_REF_INPUT_TVCLKINBC; */
+               dpll |= 3;
+       else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
+               dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
+       else
+               dpll |= PLL_REF_INPUT_DREFCLK;
+
+       return dpll;
+}
+
+static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
+                                 struct drm_display_mode *mode,
+                                 struct drm_display_mode *adjusted_mode,
+                                 int x, int y,
+                                 struct drm_framebuffer *fb)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       int pipe = intel_crtc->pipe;
+       int plane = intel_crtc->plane;
+       int num_connectors = 0;
+       intel_clock_t clock, reduced_clock;
+       u32 dpll, fp = 0, fp2 = 0;
+       bool ok, has_reduced_clock = false;
+       bool is_lvds = false, is_dp = false, is_cpu_edp = false;
+       struct intel_encoder *encoder;
+       u32 temp;
+       int ret;
+       bool dither, fdi_config_ok;
+
+       for_each_encoder_on_crtc(dev, crtc, encoder) {
+               switch (encoder->type) {
+               case INTEL_OUTPUT_LVDS:
+                       is_lvds = true;
+                       break;
+               case INTEL_OUTPUT_DISPLAYPORT:
+                       is_dp = true;
+                       break;
+               case INTEL_OUTPUT_EDP:
+                       is_dp = true;
+                       if (!intel_encoder_is_pch_edp(&encoder->base))
+                               is_cpu_edp = true;
+                       break;
+               }
+
+               num_connectors++;
+       }
+
+       WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
+            "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
+
+       ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock,
+                                    &has_reduced_clock, &reduced_clock);
+       if (!ok) {
+               DRM_ERROR("Couldn't find PLL settings for mode!\n");
+               return -EINVAL;
+       }
+
+       /* Ensure that the cursor is valid for the new mode before changing... */
+       intel_crtc_update_cursor(crtc, true);
+
+       /* determine panel color depth */
+       dither = intel_choose_pipe_bpp_dither(crtc, fb, &intel_crtc->bpp,
+                                             adjusted_mode);
+       if (is_lvds && dev_priv->lvds_dither)
+               dither = true;
+
+       fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
+       if (has_reduced_clock)
+               fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
+                       reduced_clock.m2;
+
+       dpll = ironlake_compute_dpll(intel_crtc, adjusted_mode, &clock, fp);
 
        DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
        drm_mode_debug_printmodeline(mode);
 
-       /* CPU eDP is the only output that doesn't need a PCH PLL of its own on
-        * pre-Haswell/LPT generation */
-       if (HAS_PCH_LPT(dev)) {
-               DRM_DEBUG_KMS("LPT detected: no PLL for pipe %d necessary\n",
-                               pipe);
-       } else if (!is_cpu_edp) {
+       /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
+       if (!is_cpu_edp) {
                struct intel_pch_pll *pll;
 
                pll = intel_get_pch_pll(intel_crtc, dpll, fp);
@@ -5033,47 +5719,13 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
                }
        }
 
-       if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
-               /* the chip adds 2 halflines automatically */
-               adjusted_mode->crtc_vtotal -= 1;
-               adjusted_mode->crtc_vblank_end -= 1;
-               I915_WRITE(VSYNCSHIFT(pipe),
-                          adjusted_mode->crtc_hsync_start
-                          - adjusted_mode->crtc_htotal/2);
-       } else {
-               I915_WRITE(VSYNCSHIFT(pipe), 0);
-       }
-
-       I915_WRITE(HTOTAL(pipe),
-                  (adjusted_mode->crtc_hdisplay - 1) |
-                  ((adjusted_mode->crtc_htotal - 1) << 16));
-       I915_WRITE(HBLANK(pipe),
-                  (adjusted_mode->crtc_hblank_start - 1) |
-                  ((adjusted_mode->crtc_hblank_end - 1) << 16));
-       I915_WRITE(HSYNC(pipe),
-                  (adjusted_mode->crtc_hsync_start - 1) |
-                  ((adjusted_mode->crtc_hsync_end - 1) << 16));
-
-       I915_WRITE(VTOTAL(pipe),
-                  (adjusted_mode->crtc_vdisplay - 1) |
-                  ((adjusted_mode->crtc_vtotal - 1) << 16));
-       I915_WRITE(VBLANK(pipe),
-                  (adjusted_mode->crtc_vblank_start - 1) |
-                  ((adjusted_mode->crtc_vblank_end - 1) << 16));
-       I915_WRITE(VSYNC(pipe),
-                  (adjusted_mode->crtc_vsync_start - 1) |
-                  ((adjusted_mode->crtc_vsync_end - 1) << 16));
+       intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
 
-       /* pipesrc controls the size that is scaled from, which should
-        * always be the user's requested size.
-        */
-       I915_WRITE(PIPESRC(pipe),
-                  ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
+       /* Note, this also computes intel_crtc->fdi_lanes which is used below in
+        * ironlake_check_fdi_lanes. */
+       ironlake_set_m_n(crtc, mode, adjusted_mode);
 
-       I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
-       I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
-       I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
-       I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
+       fdi_config_ok = ironlake_check_fdi_lanes(intel_crtc);
 
        if (is_cpu_edp)
                ironlake_set_pll_edp(crtc, adjusted_mode->clock);
@@ -5092,6 +5744,217 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
 
        intel_update_linetime_watermarks(dev, pipe, adjusted_mode);
 
+       return fdi_config_ok ? ret : -EINVAL;
+}
+
+static int haswell_crtc_mode_set(struct drm_crtc *crtc,
+                                struct drm_display_mode *mode,
+                                struct drm_display_mode *adjusted_mode,
+                                int x, int y,
+                                struct drm_framebuffer *fb)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       int pipe = intel_crtc->pipe;
+       int plane = intel_crtc->plane;
+       int num_connectors = 0;
+       intel_clock_t clock, reduced_clock;
+       u32 dpll = 0, fp = 0, fp2 = 0;
+       bool ok, has_reduced_clock = false;
+       bool is_lvds = false, is_dp = false, is_cpu_edp = false;
+       struct intel_encoder *encoder;
+       u32 temp;
+       int ret;
+       bool dither;
+
+       for_each_encoder_on_crtc(dev, crtc, encoder) {
+               switch (encoder->type) {
+               case INTEL_OUTPUT_LVDS:
+                       is_lvds = true;
+                       break;
+               case INTEL_OUTPUT_DISPLAYPORT:
+                       is_dp = true;
+                       break;
+               case INTEL_OUTPUT_EDP:
+                       is_dp = true;
+                       if (!intel_encoder_is_pch_edp(&encoder->base))
+                               is_cpu_edp = true;
+                       break;
+               }
+
+               num_connectors++;
+       }
+
+       if (is_cpu_edp)
+               intel_crtc->cpu_transcoder = TRANSCODER_EDP;
+       else
+               intel_crtc->cpu_transcoder = pipe;
+
+       /* We are not sure yet this won't happen. */
+       WARN(!HAS_PCH_LPT(dev), "Unexpected PCH type %d\n",
+            INTEL_PCH_TYPE(dev));
+
+       WARN(num_connectors != 1, "%d connectors attached to pipe %c\n",
+            num_connectors, pipe_name(pipe));
+
+       WARN_ON(I915_READ(PIPECONF(intel_crtc->cpu_transcoder)) &
+               (PIPECONF_ENABLE | I965_PIPECONF_ACTIVE));
+
+       WARN_ON(I915_READ(DSPCNTR(plane)) & DISPLAY_PLANE_ENABLE);
+
+       if (!intel_ddi_pll_mode_set(crtc, adjusted_mode->clock))
+               return -EINVAL;
+
+       if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
+               ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock,
+                                            &has_reduced_clock,
+                                            &reduced_clock);
+               if (!ok) {
+                       DRM_ERROR("Couldn't find PLL settings for mode!\n");
+                       return -EINVAL;
+               }
+       }
+
+       /* Ensure that the cursor is valid for the new mode before changing... */
+       intel_crtc_update_cursor(crtc, true);
+
+       /* determine panel color depth */
+       dither = intel_choose_pipe_bpp_dither(crtc, fb, &intel_crtc->bpp,
+                                             adjusted_mode);
+       if (is_lvds && dev_priv->lvds_dither)
+               dither = true;
+
+       DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
+       drm_mode_debug_printmodeline(mode);
+
+       if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
+               fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
+               if (has_reduced_clock)
+                       fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
+                             reduced_clock.m2;
+
+               dpll = ironlake_compute_dpll(intel_crtc, adjusted_mode, &clock,
+                                            fp);
+
+               /* CPU eDP is the only output that doesn't need a PCH PLL of its
+                * own on pre-Haswell/LPT generation */
+               if (!is_cpu_edp) {
+                       struct intel_pch_pll *pll;
+
+                       pll = intel_get_pch_pll(intel_crtc, dpll, fp);
+                       if (pll == NULL) {
+                               DRM_DEBUG_DRIVER("failed to find PLL for pipe %d\n",
+                                                pipe);
+                               return -EINVAL;
+                       }
+               } else
+                       intel_put_pch_pll(intel_crtc);
+
+               /* The LVDS pin pair needs to be on before the DPLLs are
+                * enabled.  This is an exception to the general rule that
+                * mode_set doesn't turn things on.
+                */
+               if (is_lvds) {
+                       temp = I915_READ(PCH_LVDS);
+                       temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
+                       if (HAS_PCH_CPT(dev)) {
+                               temp &= ~PORT_TRANS_SEL_MASK;
+                               temp |= PORT_TRANS_SEL_CPT(pipe);
+                       } else {
+                               if (pipe == 1)
+                                       temp |= LVDS_PIPEB_SELECT;
+                               else
+                                       temp &= ~LVDS_PIPEB_SELECT;
+                       }
+
+                       /* set the corresponsding LVDS_BORDER bit */
+                       temp |= dev_priv->lvds_border_bits;
+                       /* Set the B0-B3 data pairs corresponding to whether
+                        * we're going to set the DPLLs for dual-channel mode or
+                        * not.
+                        */
+                       if (clock.p2 == 7)
+                               temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
+                       else
+                               temp &= ~(LVDS_B0B3_POWER_UP |
+                                         LVDS_CLKB_POWER_UP);
+
+                       /* It would be nice to set 24 vs 18-bit mode
+                        * (LVDS_A3_POWER_UP) appropriately here, but we need to
+                        * look more thoroughly into how panels behave in the
+                        * two modes.
+                        */
+                       temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
+                       if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
+                               temp |= LVDS_HSYNC_POLARITY;
+                       if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
+                               temp |= LVDS_VSYNC_POLARITY;
+                       I915_WRITE(PCH_LVDS, temp);
+               }
+       }
+
+       if (is_dp && !is_cpu_edp) {
+               intel_dp_set_m_n(crtc, mode, adjusted_mode);
+       } else {
+               if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
+                       /* For non-DP output, clear any trans DP clock recovery
+                        * setting.*/
+                       I915_WRITE(TRANSDATA_M1(pipe), 0);
+                       I915_WRITE(TRANSDATA_N1(pipe), 0);
+                       I915_WRITE(TRANSDPLINK_M1(pipe), 0);
+                       I915_WRITE(TRANSDPLINK_N1(pipe), 0);
+               }
+       }
+
+       intel_crtc->lowfreq_avail = false;
+       if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
+               if (intel_crtc->pch_pll) {
+                       I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
+
+                       /* Wait for the clocks to stabilize. */
+                       POSTING_READ(intel_crtc->pch_pll->pll_reg);
+                       udelay(150);
+
+                       /* The pixel multiplier can only be updated once the
+                        * DPLL is enabled and the clocks are stable.
+                        *
+                        * So write it again.
+                        */
+                       I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
+               }
+
+               if (intel_crtc->pch_pll) {
+                       if (is_lvds && has_reduced_clock && i915_powersave) {
+                               I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp2);
+                               intel_crtc->lowfreq_avail = true;
+                       } else {
+                               I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp);
+                       }
+               }
+       }
+
+       intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
+
+       if (!is_dp || is_cpu_edp)
+               ironlake_set_m_n(crtc, mode, adjusted_mode);
+
+       if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+               if (is_cpu_edp)
+                       ironlake_set_pll_edp(crtc, adjusted_mode->clock);
+
+       haswell_set_pipeconf(crtc, adjusted_mode, dither);
+
+       /* Set up the display plane register */
+       I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
+       POSTING_READ(DSPCNTR(plane));
+
+       ret = intel_pipe_set_base(crtc, x, y, fb);
+
+       intel_update_watermarks(dev);
+
+       intel_update_linetime_watermarks(dev, pipe, adjusted_mode);
+
        return ret;
 }
 
@@ -5103,6 +5966,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
 {
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_encoder_helper_funcs *encoder_funcs;
+       struct intel_encoder *encoder;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int pipe = intel_crtc->pipe;
        int ret;
@@ -5113,7 +5978,19 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
                                              x, y, fb);
        drm_vblank_post_modeset(dev, pipe);
 
-       return ret;
+       if (ret != 0)
+               return ret;
+
+       for_each_encoder_on_crtc(dev, crtc, encoder) {
+               DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
+                       encoder->base.base.id,
+                       drm_get_encoder_name(&encoder->base),
+                       mode->base.id, mode->name);
+               encoder_funcs = encoder->base.helper_private;
+               encoder_funcs->mode_set(&encoder->base, mode, adjusted_mode);
+       }
+
+       return 0;
 }
 
 static bool intel_eld_uptodate(struct drm_connector *connector,
@@ -5749,7 +6626,7 @@ intel_framebuffer_create_for_mode(struct drm_device *dev,
                                  int depth, int bpp)
 {
        struct drm_i915_gem_object *obj;
-       struct drm_mode_fb_cmd2 mode_cmd;
+       struct drm_mode_fb_cmd2 mode_cmd = { 0 };
 
        obj = i915_gem_alloc_object(dev,
                                    intel_framebuffer_size_for_mode(mode, bpp));
@@ -5879,24 +6756,19 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
                DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
        if (IS_ERR(fb)) {
                DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
-               goto fail;
+               return false;
        }
 
        if (!intel_set_mode(crtc, mode, 0, 0, fb)) {
                DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
                if (old->release_fb)
                        old->release_fb->funcs->destroy(old->release_fb);
-               goto fail;
+               return false;
        }
 
        /* let the connector get through one full cycle before testing */
        intel_wait_for_vblank(dev, intel_crtc->pipe);
-
        return true;
-fail:
-       connector->encoder = NULL;
-       encoder->crtc = NULL;
-       return false;
 }
 
 void intel_release_load_detect_pipe(struct drm_connector *connector,
@@ -6021,12 +6893,12 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       int pipe = intel_crtc->pipe;
+       enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
        struct drm_display_mode *mode;
-       int htot = I915_READ(HTOTAL(pipe));
-       int hsync = I915_READ(HSYNC(pipe));
-       int vtot = I915_READ(VTOTAL(pipe));
-       int vsync = I915_READ(VSYNC(pipe));
+       int htot = I915_READ(HTOTAL(cpu_transcoder));
+       int hsync = I915_READ(HSYNC(cpu_transcoder));
+       int vtot = I915_READ(VTOTAL(cpu_transcoder));
+       int vsync = I915_READ(VSYNC(cpu_transcoder));
 
        mode = kzalloc(sizeof(*mode), GFP_KERNEL);
        if (!mode)
@@ -6183,14 +7055,19 @@ static void intel_unpin_work_fn(struct work_struct *__work)
 {
        struct intel_unpin_work *work =
                container_of(__work, struct intel_unpin_work, work);
+       struct drm_device *dev = work->crtc->dev;
 
-       mutex_lock(&work->dev->struct_mutex);
+       mutex_lock(&dev->struct_mutex);
        intel_unpin_fb_obj(work->old_fb_obj);
        drm_gem_object_unreference(&work->pending_flip_obj->base);
        drm_gem_object_unreference(&work->old_fb_obj->base);
 
-       intel_update_fbc(work->dev);
-       mutex_unlock(&work->dev->struct_mutex);
+       intel_update_fbc(dev);
+       mutex_unlock(&dev->struct_mutex);
+
+       BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0);
+       atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count);
+
        kfree(work);
 }
 
@@ -6201,8 +7078,6 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_unpin_work *work;
        struct drm_i915_gem_object *obj;
-       struct drm_pending_vblank_event *e;
-       struct timeval tvbl;
        unsigned long flags;
 
        /* Ignore early vblank irqs */
@@ -6211,24 +7086,22 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
 
        spin_lock_irqsave(&dev->event_lock, flags);
        work = intel_crtc->unpin_work;
-       if (work == NULL || !work->pending) {
+
+       /* Ensure we don't miss a work->pending update ... */
+       smp_rmb();
+
+       if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
                spin_unlock_irqrestore(&dev->event_lock, flags);
                return;
        }
 
-       intel_crtc->unpin_work = NULL;
-
-       if (work->event) {
-               e = work->event;
-               e->event.sequence = drm_vblank_count_and_time(dev, intel_crtc->pipe, &tvbl);
+       /* and that the unpin work is consistent wrt ->pending. */
+       smp_rmb();
 
-               e->event.tv_sec = tvbl.tv_sec;
-               e->event.tv_usec = tvbl.tv_usec;
+       intel_crtc->unpin_work = NULL;
 
-               list_add_tail(&e->base.link,
-                             &e->base.file_priv->event_list);
-               wake_up_interruptible(&e->base.file_priv->event_wait);
-       }
+       if (work->event)
+               drm_send_vblank_event(dev, intel_crtc->pipe, work->event);
 
        drm_vblank_put(dev, intel_crtc->pipe);
 
@@ -6238,9 +7111,9 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
 
        atomic_clear_mask(1 << intel_crtc->plane,
                          &obj->pending_flip.counter);
-
        wake_up(&dev_priv->pending_flip_queue);
-       schedule_work(&work->work);
+
+       queue_work(dev_priv->wq, &work->work);
 
        trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
 }
@@ -6268,16 +7141,25 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
                to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
        unsigned long flags;
 
+       /* NB: An MMIO update of the plane base pointer will also
+        * generate a page-flip completion irq, i.e. every modeset
+        * is also accompanied by a spurious intel_prepare_page_flip().
+        */
        spin_lock_irqsave(&dev->event_lock, flags);
-       if (intel_crtc->unpin_work) {
-               if ((++intel_crtc->unpin_work->pending) > 1)
-                       DRM_ERROR("Prepared flip multiple times\n");
-       } else {
-               DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
-       }
+       if (intel_crtc->unpin_work)
+               atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
        spin_unlock_irqrestore(&dev->event_lock, flags);
 }
 
+inline static void intel_mark_page_flip_active(struct intel_crtc *intel_crtc)
+{
+       /* Ensure that the work item is consistent when activating it ... */
+       smp_wmb();
+       atomic_set(&intel_crtc->unpin_work->pending, INTEL_FLIP_PENDING);
+       /* and that it is marked active as soon as the irq could fire. */
+       smp_wmb();
+}
+
 static int intel_gen2_queue_flip(struct drm_device *dev,
                                 struct drm_crtc *crtc,
                                 struct drm_framebuffer *fb,
@@ -6311,6 +7193,8 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
        intel_ring_emit(ring, fb->pitches[0]);
        intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
        intel_ring_emit(ring, 0); /* aux display base address, unused */
+
+       intel_mark_page_flip_active(intel_crtc);
        intel_ring_advance(ring);
        return 0;
 
@@ -6351,6 +7235,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
        intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
        intel_ring_emit(ring, MI_NOOP);
 
+       intel_mark_page_flip_active(intel_crtc);
        intel_ring_advance(ring);
        return 0;
 
@@ -6397,6 +7282,8 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
        pf = 0;
        pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
        intel_ring_emit(ring, pf | pipesrc);
+
+       intel_mark_page_flip_active(intel_crtc);
        intel_ring_advance(ring);
        return 0;
 
@@ -6439,6 +7326,8 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
        pf = 0;
        pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
        intel_ring_emit(ring, pf | pipesrc);
+
+       intel_mark_page_flip_active(intel_crtc);
        intel_ring_advance(ring);
        return 0;
 
@@ -6493,6 +7382,8 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
        intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
        intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
        intel_ring_emit(ring, (MI_NOOP));
+
+       intel_mark_page_flip_active(intel_crtc);
        intel_ring_advance(ring);
        return 0;
 
@@ -6541,7 +7432,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
                return -ENOMEM;
 
        work->event = event;
-       work->dev = crtc->dev;
+       work->crtc = crtc;
        intel_fb = to_intel_framebuffer(crtc->fb);
        work->old_fb_obj = intel_fb->obj;
        INIT_WORK(&work->work, intel_unpin_work_fn);
@@ -6566,6 +7457,9 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
        intel_fb = to_intel_framebuffer(fb);
        obj = intel_fb->obj;
 
+       if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
+               flush_workqueue(dev_priv->wq);
+
        ret = i915_mutex_lock_interruptible(dev);
        if (ret)
                goto cleanup;
@@ -6584,6 +7478,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
         * the flip occurs and the object is no longer visible.
         */
        atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
+       atomic_inc(&intel_crtc->unpin_work_count);
 
        ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
        if (ret)
@@ -6598,6 +7493,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
        return 0;
 
 cleanup_pending:
+       atomic_dec(&intel_crtc->unpin_work_count);
        atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
        drm_gem_object_unreference(&work->old_fb_obj->base);
        drm_gem_object_unreference(&obj->base);
@@ -6893,7 +7789,7 @@ intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
                                dev->mode_config.dpms_property;
 
                        connector->dpms = DRM_MODE_DPMS_ON;
-                       drm_connector_property_set_value(connector,
+                       drm_object_property_set_value(&connector->base,
                                                         dpms_property,
                                                         DRM_MODE_DPMS_ON);
 
@@ -7015,8 +7911,6 @@ bool intel_set_mode(struct drm_crtc *crtc,
        struct drm_device *dev = crtc->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode;
-       struct drm_encoder_helper_funcs *encoder_funcs;
-       struct drm_encoder *encoder;
        struct intel_crtc *intel_crtc;
        unsigned disable_pipes, prepare_pipes, modeset_pipes;
        bool ret = true;
@@ -7061,6 +7955,9 @@ bool intel_set_mode(struct drm_crtc *crtc,
         * update the the output configuration. */
        intel_modeset_update_state(dev, prepare_pipes);
 
+       if (dev_priv->display.modeset_global_resources)
+               dev_priv->display.modeset_global_resources(dev);
+
        /* Set up the DPLL and any encoders state that needs to adjust or depend
         * on the DPLL.
         */
@@ -7070,18 +7967,6 @@ bool intel_set_mode(struct drm_crtc *crtc,
                                           x, y, fb);
                if (!ret)
                    goto done;
-
-               list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-
-                       if (encoder->crtc != &intel_crtc->base)
-                               continue;
-
-                       DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
-                               encoder->base.id, drm_get_encoder_name(encoder),
-                               mode->base.id, mode->name);
-                       encoder_funcs = encoder->helper_private;
-                       encoder_funcs->mode_set(encoder, mode, adjusted_mode);
-               }
        }
 
        /* Now enable the clocks, plane, pipe, and connectors that we set up. */
@@ -7420,6 +8305,12 @@ static const struct drm_crtc_funcs intel_crtc_funcs = {
        .page_flip = intel_crtc_page_flip,
 };
 
+static void intel_cpu_pll_init(struct drm_device *dev)
+{
+       if (IS_HASWELL(dev))
+               intel_ddi_pll_init(dev);
+}
+
 static void intel_pch_pll_init(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
@@ -7459,6 +8350,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
        /* Swap pipes & planes for FBC on pre-965 */
        intel_crtc->pipe = pipe;
        intel_crtc->plane = pipe;
+       intel_crtc->cpu_transcoder = pipe;
        if (IS_MOBILE(dev) && IS_GEN3(dev)) {
                DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
                intel_crtc->plane = !pipe;
@@ -7551,17 +8443,9 @@ static void intel_setup_outputs(struct drm_device *dev)
                I915_WRITE(PFIT_CONTROL, 0);
        }
 
-       if (HAS_PCH_SPLIT(dev)) {
-               dpd_is_edp = intel_dpd_is_edp(dev);
-
-               if (has_edp_a(dev))
-                       intel_dp_init(dev, DP_A, PORT_A);
-
-               if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
-                       intel_dp_init(dev, PCH_DP_D, PORT_D);
-       }
-
-       intel_crt_init(dev);
+       if (!(IS_HASWELL(dev) &&
+             (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)))
+               intel_crt_init(dev);
 
        if (IS_HASWELL(dev)) {
                int found;
@@ -7584,6 +8468,10 @@ static void intel_setup_outputs(struct drm_device *dev)
                        intel_ddi_init(dev, PORT_D);
        } else if (HAS_PCH_SPLIT(dev)) {
                int found;
+               dpd_is_edp = intel_dpd_is_edp(dev);
+
+               if (has_edp_a(dev))
+                       intel_dp_init(dev, DP_A, PORT_A);
 
                if (I915_READ(HDMIB) & PORT_DETECTED) {
                        /* PCH SDVOB multiplex with HDMIB */
@@ -7603,11 +8491,15 @@ static void intel_setup_outputs(struct drm_device *dev)
                if (I915_READ(PCH_DP_C) & DP_DETECTED)
                        intel_dp_init(dev, PCH_DP_C, PORT_C);
 
-               if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
+               if (I915_READ(PCH_DP_D) & DP_DETECTED)
                        intel_dp_init(dev, PCH_DP_D, PORT_D);
        } else if (IS_VALLEYVIEW(dev)) {
                int found;
 
+               /* Check for built-in panel first. Shares lanes with HDMI on SDVOC */
+               if (I915_READ(DP_C) & DP_DETECTED)
+                       intel_dp_init(dev, DP_C, PORT_C);
+
                if (I915_READ(SDVOB) & PORT_DETECTED) {
                        /* SDVOB multiplex with HDMIB */
                        found = intel_sdvo_init(dev, SDVOB, true);
@@ -7620,9 +8512,6 @@ static void intel_setup_outputs(struct drm_device *dev)
                if (I915_READ(SDVOC) & PORT_DETECTED)
                        intel_hdmi_init(dev, SDVOC, PORT_C);
 
-               /* Shares lanes with HDMI on SDVOC */
-               if (I915_READ(DP_C) & DP_DETECTED)
-                       intel_dp_init(dev, DP_C, PORT_C);
        } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
                bool found = false;
 
@@ -7676,8 +8565,9 @@ static void intel_setup_outputs(struct drm_device *dev)
                        intel_encoder_clones(encoder);
        }
 
-       if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
-               ironlake_init_pch_refclk(dev);
+       intel_init_pch_refclk(dev);
+
+       drm_helper_move_panel_connectors_to_head(dev);
 }
 
 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
@@ -7718,27 +8608,51 @@ int intel_framebuffer_init(struct drm_device *dev,
        if (mode_cmd->pitches[0] & 63)
                return -EINVAL;
 
+       /* FIXME <= Gen4 stride limits are bit unclear */
+       if (mode_cmd->pitches[0] > 32768)
+               return -EINVAL;
+
+       if (obj->tiling_mode != I915_TILING_NONE &&
+           mode_cmd->pitches[0] != obj->stride)
+               return -EINVAL;
+
+       /* Reject formats not supported by any plane early. */
        switch (mode_cmd->pixel_format) {
-       case DRM_FORMAT_RGB332:
+       case DRM_FORMAT_C8:
        case DRM_FORMAT_RGB565:
        case DRM_FORMAT_XRGB8888:
-       case DRM_FORMAT_XBGR8888:
        case DRM_FORMAT_ARGB8888:
+               break;
+       case DRM_FORMAT_XRGB1555:
+       case DRM_FORMAT_ARGB1555:
+               if (INTEL_INFO(dev)->gen > 3)
+                       return -EINVAL;
+               break;
+       case DRM_FORMAT_XBGR8888:
+       case DRM_FORMAT_ABGR8888:
        case DRM_FORMAT_XRGB2101010:
        case DRM_FORMAT_ARGB2101010:
-               /* RGB formats are common across chipsets */
+       case DRM_FORMAT_XBGR2101010:
+       case DRM_FORMAT_ABGR2101010:
+               if (INTEL_INFO(dev)->gen < 4)
+                       return -EINVAL;
                break;
        case DRM_FORMAT_YUYV:
        case DRM_FORMAT_UYVY:
        case DRM_FORMAT_YVYU:
        case DRM_FORMAT_VYUY:
+               if (INTEL_INFO(dev)->gen < 6)
+                       return -EINVAL;
                break;
        default:
-               DRM_DEBUG_KMS("unsupported pixel format %u\n",
-                               mode_cmd->pixel_format);
+               DRM_DEBUG_KMS("unsupported pixel format 0x%08x\n", mode_cmd->pixel_format);
                return -EINVAL;
        }
 
+       /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
+       if (mode_cmd->offsets[0] != 0)
+               return -EINVAL;
+
        ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
        if (ret) {
                DRM_ERROR("framebuffer init failed %d\n", ret);
@@ -7776,7 +8690,13 @@ static void intel_init_display(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
 
        /* We always want a DPMS function */
-       if (HAS_PCH_SPLIT(dev)) {
+       if (IS_HASWELL(dev)) {
+               dev_priv->display.crtc_mode_set = haswell_crtc_mode_set;
+               dev_priv->display.crtc_enable = haswell_crtc_enable;
+               dev_priv->display.crtc_disable = haswell_crtc_disable;
+               dev_priv->display.off = haswell_crtc_off;
+               dev_priv->display.update_plane = ironlake_update_plane;
+       } else if (HAS_PCH_SPLIT(dev)) {
                dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
                dev_priv->display.crtc_enable = ironlake_crtc_enable;
                dev_priv->display.crtc_disable = ironlake_crtc_disable;
@@ -7827,6 +8747,8 @@ static void intel_init_display(struct drm_device *dev)
                        /* FIXME: detect B0+ stepping and use auto training */
                        dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
                        dev_priv->display.write_eld = ironlake_write_eld;
+                       dev_priv->display.modeset_global_resources =
+                               ivb_modeset_global_resources;
                } else if (IS_HASWELL(dev)) {
                        dev_priv->display.fdi_link_train = hsw_fdi_link_train;
                        dev_priv->display.write_eld = haswell_write_eld;
@@ -8058,6 +8980,7 @@ void intel_modeset_init(struct drm_device *dev)
                        DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret);
        }
 
+       intel_cpu_pll_init(dev);
        intel_pch_pll_init(dev);
 
        /* Just disable it once at startup */
@@ -8127,7 +9050,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
        u32 reg;
 
        /* Clear any frame start delays used for debugging left by the BIOS */
-       reg = PIPECONF(crtc->pipe);
+       reg = PIPECONF(crtc->cpu_transcoder);
        I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
 
        /* We need to sanitize the plane -> pipe mapping first because this will
@@ -8246,7 +9169,8 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
 
 /* Scan out the current hw modeset state, sanitizes it and maps it into the drm
  * and i915 state tracking structures. */
-void intel_modeset_setup_hw_state(struct drm_device *dev)
+void intel_modeset_setup_hw_state(struct drm_device *dev,
+                                 bool force_restore)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        enum pipe pipe;
@@ -8255,10 +9179,35 @@ void intel_modeset_setup_hw_state(struct drm_device *dev)
        struct intel_encoder *encoder;
        struct intel_connector *connector;
 
+       if (IS_HASWELL(dev)) {
+               tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
+
+               if (tmp & TRANS_DDI_FUNC_ENABLE) {
+                       switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
+                       case TRANS_DDI_EDP_INPUT_A_ON:
+                       case TRANS_DDI_EDP_INPUT_A_ONOFF:
+                               pipe = PIPE_A;
+                               break;
+                       case TRANS_DDI_EDP_INPUT_B_ONOFF:
+                               pipe = PIPE_B;
+                               break;
+                       case TRANS_DDI_EDP_INPUT_C_ONOFF:
+                               pipe = PIPE_C;
+                               break;
+                       }
+
+                       crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+                       crtc->cpu_transcoder = TRANSCODER_EDP;
+
+                       DRM_DEBUG_KMS("Pipe %c using transcoder EDP\n",
+                                     pipe_name(pipe));
+               }
+       }
+
        for_each_pipe(pipe) {
                crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
 
-               tmp = I915_READ(PIPECONF(pipe));
+               tmp = I915_READ(PIPECONF(crtc->cpu_transcoder));
                if (tmp & PIPECONF_ENABLE)
                        crtc->active = true;
                else
@@ -8271,6 +9220,9 @@ void intel_modeset_setup_hw_state(struct drm_device *dev)
                              crtc->active ? "enabled" : "disabled");
        }
 
+       if (IS_HASWELL(dev))
+               intel_ddi_setup_hw_pll_state(dev);
+
        list_for_each_entry(encoder, &dev->mode_config.encoder_list,
                            base.head) {
                pipe = 0;
@@ -8317,9 +9269,19 @@ void intel_modeset_setup_hw_state(struct drm_device *dev)
                intel_sanitize_crtc(crtc);
        }
 
-       intel_modeset_update_staged_output_state(dev);
+       if (force_restore) {
+               for_each_pipe(pipe) {
+                       crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+                       intel_set_mode(&crtc->base, &crtc->base.mode,
+                                      crtc->base.x, crtc->base.y, crtc->base.fb);
+               }
+       } else {
+               intel_modeset_update_staged_output_state(dev);
+       }
 
        intel_modeset_check_state(dev);
+
+       drm_mode_config_reset(dev);
 }
 
 void intel_modeset_gem_init(struct drm_device *dev)
@@ -8328,7 +9290,7 @@ void intel_modeset_gem_init(struct drm_device *dev)
 
        intel_setup_overlay(dev);
 
-       intel_modeset_setup_hw_state(dev);
+       intel_modeset_setup_hw_state(dev, false);
 }
 
 void intel_modeset_cleanup(struct drm_device *dev)
@@ -8447,6 +9409,7 @@ intel_display_capture_error_state(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct intel_display_error_state *error;
+       enum transcoder cpu_transcoder;
        int i;
 
        error = kmalloc(sizeof(*error), GFP_ATOMIC);
@@ -8454,6 +9417,8 @@ intel_display_capture_error_state(struct drm_device *dev)
                return NULL;
 
        for_each_pipe(i) {
+               cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, i);
+
                error->cursor[i].control = I915_READ(CURCNTR(i));
                error->cursor[i].position = I915_READ(CURPOS(i));
                error->cursor[i].base = I915_READ(CURBASE(i));
@@ -8468,14 +9433,14 @@ intel_display_capture_error_state(struct drm_device *dev)
                        error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
                }
 
-               error->pipe[i].conf = I915_READ(PIPECONF(i));
+               error->pipe[i].conf = I915_READ(PIPECONF(cpu_transcoder));
                error->pipe[i].source = I915_READ(PIPESRC(i));
-               error->pipe[i].htotal = I915_READ(HTOTAL(i));
-               error->pipe[i].hblank = I915_READ(HBLANK(i));
-               error->pipe[i].hsync = I915_READ(HSYNC(i));
-               error->pipe[i].vtotal = I915_READ(VTOTAL(i));
-               error->pipe[i].vblank = I915_READ(VBLANK(i));
-               error->pipe[i].vsync = I915_READ(VSYNC(i));
+               error->pipe[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
+               error->pipe[i].hblank = I915_READ(HBLANK(cpu_transcoder));
+               error->pipe[i].hsync = I915_READ(HSYNC(cpu_transcoder));
+               error->pipe[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
+               error->pipe[i].vblank = I915_READ(VBLANK(cpu_transcoder));
+               error->pipe[i].vsync = I915_READ(VSYNC(cpu_transcoder));
        }
 
        return error;
index 368ed8e..1b63d55 100644 (file)
@@ -36,8 +36,6 @@
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 
-#define DP_RECEIVER_CAP_SIZE   0xf
-#define DP_LINK_STATUS_SIZE    6
 #define DP_LINK_CHECK_TIMEOUT  (10 * 1000)
 
 /**
@@ -49,7 +47,9 @@
  */
 static bool is_edp(struct intel_dp *intel_dp)
 {
-       return intel_dp->base.type == INTEL_OUTPUT_EDP;
+       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+
+       return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
 }
 
 /**
@@ -76,15 +76,16 @@ static bool is_cpu_edp(struct intel_dp *intel_dp)
        return is_edp(intel_dp) && !is_pch_edp(intel_dp);
 }
 
-static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
+static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
 {
-       return container_of(encoder, struct intel_dp, base.base);
+       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+
+       return intel_dig_port->base.base.dev;
 }
 
 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
 {
-       return container_of(intel_attached_encoder(connector),
-                           struct intel_dp, base);
+       return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
 }
 
 /**
@@ -106,49 +107,32 @@ bool intel_encoder_is_pch_edp(struct drm_encoder *encoder)
        return is_pch_edp(intel_dp);
 }
 
-static void intel_dp_start_link_train(struct intel_dp *intel_dp);
-static void intel_dp_complete_link_train(struct intel_dp *intel_dp);
 static void intel_dp_link_down(struct intel_dp *intel_dp);
 
 void
 intel_edp_link_config(struct intel_encoder *intel_encoder,
                       int *lane_num, int *link_bw)
 {
-       struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
+       struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
 
        *lane_num = intel_dp->lane_count;
-       if (intel_dp->link_bw == DP_LINK_BW_1_62)
-               *link_bw = 162000;
-       else if (intel_dp->link_bw == DP_LINK_BW_2_7)
-               *link_bw = 270000;
+       *link_bw = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
 }
 
 int
 intel_edp_target_clock(struct intel_encoder *intel_encoder,
                       struct drm_display_mode *mode)
 {
-       struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
+       struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
+       struct intel_connector *intel_connector = intel_dp->attached_connector;
 
-       if (intel_dp->panel_fixed_mode)
-               return intel_dp->panel_fixed_mode->clock;
+       if (intel_connector->panel.fixed_mode)
+               return intel_connector->panel.fixed_mode->clock;
        else
                return mode->clock;
 }
 
 static int
-intel_dp_max_lane_count(struct intel_dp *intel_dp)
-{
-       int max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f;
-       switch (max_lane_count) {
-       case 1: case 2: case 4:
-               break;
-       default:
-               max_lane_count = 4;
-       }
-       return max_lane_count;
-}
-
-static int
 intel_dp_max_link_bw(struct intel_dp *intel_dp)
 {
        int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
@@ -208,7 +192,7 @@ intel_dp_adjust_dithering(struct intel_dp *intel_dp,
                          bool adjust_mode)
 {
        int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
-       int max_lanes = intel_dp_max_lane_count(intel_dp);
+       int max_lanes = drm_dp_max_lane_count(intel_dp->dpcd);
        int max_rate, mode_rate;
 
        mode_rate = intel_dp_link_required(mode->clock, 24);
@@ -234,12 +218,14 @@ intel_dp_mode_valid(struct drm_connector *connector,
                    struct drm_display_mode *mode)
 {
        struct intel_dp *intel_dp = intel_attached_dp(connector);
+       struct intel_connector *intel_connector = to_intel_connector(connector);
+       struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
 
-       if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
-               if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay)
+       if (is_edp(intel_dp) && fixed_mode) {
+               if (mode->hdisplay > fixed_mode->hdisplay)
                        return MODE_PANEL;
 
-               if (mode->vdisplay > intel_dp->panel_fixed_mode->vdisplay)
+               if (mode->vdisplay > fixed_mode->vdisplay)
                        return MODE_PANEL;
        }
 
@@ -285,6 +271,10 @@ intel_hrawclk(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
        uint32_t clkcfg;
 
+       /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
+       if (IS_VALLEYVIEW(dev))
+               return 200;
+
        clkcfg = I915_READ(CLKCFG);
        switch (clkcfg & CLKCFG_FSB_MASK) {
        case CLKCFG_FSB_400:
@@ -310,7 +300,7 @@ intel_hrawclk(struct drm_device *dev)
 
 static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
 {
-       struct drm_device *dev = intel_dp->base.base.dev;
+       struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct drm_i915_private *dev_priv = dev->dev_private;
 
        return (I915_READ(PCH_PP_STATUS) & PP_ON) != 0;
@@ -318,7 +308,7 @@ static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
 
 static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp)
 {
-       struct drm_device *dev = intel_dp->base.base.dev;
+       struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct drm_i915_private *dev_priv = dev->dev_private;
 
        return (I915_READ(PCH_PP_CONTROL) & EDP_FORCE_VDD) != 0;
@@ -327,7 +317,7 @@ static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp)
 static void
 intel_dp_check_edp(struct intel_dp *intel_dp)
 {
-       struct drm_device *dev = intel_dp->base.base.dev;
+       struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct drm_i915_private *dev_priv = dev->dev_private;
 
        if (!is_edp(intel_dp))
@@ -346,7 +336,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
                uint8_t *recv, int recv_size)
 {
        uint32_t output_reg = intel_dp->output_reg;
-       struct drm_device *dev = intel_dp->base.base.dev;
+       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       struct drm_device *dev = intel_dig_port->base.base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        uint32_t ch_ctl = output_reg + 0x10;
        uint32_t ch_data = ch_ctl + 4;
@@ -356,6 +347,29 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
        uint32_t aux_clock_divider;
        int try, precharge;
 
+       if (IS_HASWELL(dev)) {
+               switch (intel_dig_port->port) {
+               case PORT_A:
+                       ch_ctl = DPA_AUX_CH_CTL;
+                       ch_data = DPA_AUX_CH_DATA1;
+                       break;
+               case PORT_B:
+                       ch_ctl = PCH_DPB_AUX_CH_CTL;
+                       ch_data = PCH_DPB_AUX_CH_DATA1;
+                       break;
+               case PORT_C:
+                       ch_ctl = PCH_DPC_AUX_CH_CTL;
+                       ch_data = PCH_DPC_AUX_CH_DATA1;
+                       break;
+               case PORT_D:
+                       ch_ctl = PCH_DPD_AUX_CH_CTL;
+                       ch_data = PCH_DPD_AUX_CH_DATA1;
+                       break;
+               default:
+                       BUG();
+               }
+       }
+
        intel_dp_check_edp(intel_dp);
        /* The clock divider is based off the hrawclk,
         * and would like to run at 2MHz. So, take the
@@ -365,12 +379,16 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
         * clock divider.
         */
        if (is_cpu_edp(intel_dp)) {
-               if (IS_GEN6(dev) || IS_GEN7(dev))
+               if (IS_HASWELL(dev))
+                       aux_clock_divider = intel_ddi_get_cdclk_freq(dev_priv) >> 1;
+               else if (IS_VALLEYVIEW(dev))
+                       aux_clock_divider = 100;
+               else if (IS_GEN6(dev) || IS_GEN7(dev))
                        aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */
                else
                        aux_clock_divider = 225; /* eDP input clock at 450Mhz */
        } else if (HAS_PCH_SPLIT(dev))
-               aux_clock_divider = 63; /* IRL input clock fixed at 125Mhz */
+               aux_clock_divider = DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
        else
                aux_clock_divider = intel_hrawclk(dev) / 2;
 
@@ -642,9 +660,6 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
        return -EREMOTEIO;
 }
 
-static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
-static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
-
 static int
 intel_dp_i2c_init(struct intel_dp *intel_dp,
                  struct intel_connector *intel_connector, const char *name)
@@ -670,22 +685,25 @@ intel_dp_i2c_init(struct intel_dp *intel_dp,
        return ret;
 }
 
-static bool
+bool
 intel_dp_mode_fixup(struct drm_encoder *encoder,
                    const struct drm_display_mode *mode,
                    struct drm_display_mode *adjusted_mode)
 {
        struct drm_device *dev = encoder->dev;
        struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+       struct intel_connector *intel_connector = intel_dp->attached_connector;
        int lane_count, clock;
-       int max_lane_count = intel_dp_max_lane_count(intel_dp);
+       int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
        int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
        int bpp, mode_rate;
        static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
 
-       if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
-               intel_fixed_panel_mode(intel_dp->panel_fixed_mode, adjusted_mode);
-               intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN,
+       if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
+               intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
+                                      adjusted_mode);
+               intel_pch_panel_fitting(dev,
+                                       intel_connector->panel.fitting_mode,
                                        mode, adjusted_mode);
        }
 
@@ -762,21 +780,23 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
                 struct drm_display_mode *adjusted_mode)
 {
        struct drm_device *dev = crtc->dev;
-       struct intel_encoder *encoder;
+       struct intel_encoder *intel_encoder;
+       struct intel_dp *intel_dp;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int lane_count = 4;
        struct intel_dp_m_n m_n;
        int pipe = intel_crtc->pipe;
+       enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
 
        /*
         * Find the lane count in the intel_encoder private
         */
-       for_each_encoder_on_crtc(dev, crtc, encoder) {
-               struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+       for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+               intel_dp = enc_to_intel_dp(&intel_encoder->base);
 
-               if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT ||
-                   intel_dp->base.type == INTEL_OUTPUT_EDP)
+               if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
+                   intel_encoder->type == INTEL_OUTPUT_EDP)
                {
                        lane_count = intel_dp->lane_count;
                        break;
@@ -791,23 +811,46 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
        intel_dp_compute_m_n(intel_crtc->bpp, lane_count,
                             mode->clock, adjusted_mode->clock, &m_n);
 
-       if (HAS_PCH_SPLIT(dev)) {
-               I915_WRITE(TRANSDATA_M1(pipe),
-                          ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
-                          m_n.gmch_m);
+       if (IS_HASWELL(dev)) {
+               I915_WRITE(PIPE_DATA_M1(cpu_transcoder),
+                          TU_SIZE(m_n.tu) | m_n.gmch_m);
+               I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n);
+               I915_WRITE(PIPE_LINK_M1(cpu_transcoder), m_n.link_m);
+               I915_WRITE(PIPE_LINK_N1(cpu_transcoder), m_n.link_n);
+       } else if (HAS_PCH_SPLIT(dev)) {
+               I915_WRITE(TRANSDATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
                I915_WRITE(TRANSDATA_N1(pipe), m_n.gmch_n);
                I915_WRITE(TRANSDPLINK_M1(pipe), m_n.link_m);
                I915_WRITE(TRANSDPLINK_N1(pipe), m_n.link_n);
+       } else if (IS_VALLEYVIEW(dev)) {
+               I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
+               I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
+               I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
+               I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
        } else {
                I915_WRITE(PIPE_GMCH_DATA_M(pipe),
-                          ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
-                          m_n.gmch_m);
+                          TU_SIZE(m_n.tu) | m_n.gmch_m);
                I915_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n);
                I915_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m);
                I915_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n);
        }
 }
 
+void intel_dp_init_link_config(struct intel_dp *intel_dp)
+{
+       memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
+       intel_dp->link_configuration[0] = intel_dp->link_bw;
+       intel_dp->link_configuration[1] = intel_dp->lane_count;
+       intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B;
+       /*
+        * Check for DPCD version > 1.1 and enhanced framing support
+        */
+       if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
+           (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
+               intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+       }
+}
+
 static void
 intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
                  struct drm_display_mode *adjusted_mode)
@@ -815,7 +858,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
        struct drm_device *dev = encoder->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-       struct drm_crtc *crtc = intel_dp->base.base.crtc;
+       struct drm_crtc *crtc = encoder->crtc;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
        /*
@@ -860,21 +903,12 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
                intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
                intel_write_eld(encoder, adjusted_mode);
        }
-       memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
-       intel_dp->link_configuration[0] = intel_dp->link_bw;
-       intel_dp->link_configuration[1] = intel_dp->lane_count;
-       intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B;
-       /*
-        * Check for DPCD version > 1.1 and enhanced framing support
-        */
-       if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
-           (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
-               intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
-       }
+
+       intel_dp_init_link_config(intel_dp);
 
        /* Split out the IBX/CPU vs CPT settings */
 
-       if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) {
+       if (is_cpu_edp(intel_dp) && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
                if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
                        intel_dp->DP |= DP_SYNC_HS_HIGH;
                if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
@@ -931,7 +965,7 @@ static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
                                       u32 mask,
                                       u32 value)
 {
-       struct drm_device *dev = intel_dp->base.base.dev;
+       struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct drm_i915_private *dev_priv = dev->dev_private;
 
        DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
@@ -978,9 +1012,9 @@ static  u32 ironlake_get_pp_control(struct drm_i915_private *dev_priv)
        return control;
 }
 
-static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
+void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
 {
-       struct drm_device *dev = intel_dp->base.base.dev;
+       struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 pp;
 
@@ -1019,7 +1053,7 @@ static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
 
 static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
 {
-       struct drm_device *dev = intel_dp->base.base.dev;
+       struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 pp;
 
@@ -1041,14 +1075,14 @@ static void ironlake_panel_vdd_work(struct work_struct *__work)
 {
        struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
                                                 struct intel_dp, panel_vdd_work);
-       struct drm_device *dev = intel_dp->base.base.dev;
+       struct drm_device *dev = intel_dp_to_dev(intel_dp);
 
        mutex_lock(&dev->mode_config.mutex);
        ironlake_panel_vdd_off_sync(intel_dp);
        mutex_unlock(&dev->mode_config.mutex);
 }
 
-static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
+void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
 {
        if (!is_edp(intel_dp))
                return;
@@ -1071,9 +1105,9 @@ static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
        }
 }
 
-static void ironlake_edp_panel_on(struct intel_dp *intel_dp)
+void ironlake_edp_panel_on(struct intel_dp *intel_dp)
 {
-       struct drm_device *dev = intel_dp->base.base.dev;
+       struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 pp;
 
@@ -1113,9 +1147,9 @@ static void ironlake_edp_panel_on(struct intel_dp *intel_dp)
        }
 }
 
-static void ironlake_edp_panel_off(struct intel_dp *intel_dp)
+void ironlake_edp_panel_off(struct intel_dp *intel_dp)
 {
-       struct drm_device *dev = intel_dp->base.base.dev;
+       struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 pp;
 
@@ -1138,10 +1172,12 @@ static void ironlake_edp_panel_off(struct intel_dp *intel_dp)
        ironlake_wait_panel_off(intel_dp);
 }
 
-static void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
+void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
 {
-       struct drm_device *dev = intel_dp->base.base.dev;
+       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       struct drm_device *dev = intel_dig_port->base.base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
+       int pipe = to_intel_crtc(intel_dig_port->base.base.crtc)->pipe;
        u32 pp;
 
        if (!is_edp(intel_dp))
@@ -1159,17 +1195,21 @@ static void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
        pp |= EDP_BLC_ENABLE;
        I915_WRITE(PCH_PP_CONTROL, pp);
        POSTING_READ(PCH_PP_CONTROL);
+
+       intel_panel_enable_backlight(dev, pipe);
 }
 
-static void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
+void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
 {
-       struct drm_device *dev = intel_dp->base.base.dev;
+       struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 pp;
 
        if (!is_edp(intel_dp))
                return;
 
+       intel_panel_disable_backlight(dev);
+
        DRM_DEBUG_KMS("\n");
        pp = ironlake_get_pp_control(dev_priv);
        pp &= ~EDP_BLC_ENABLE;
@@ -1180,8 +1220,9 @@ static void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
 
 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
 {
-       struct drm_device *dev = intel_dp->base.base.dev;
-       struct drm_crtc *crtc = intel_dp->base.base.crtc;
+       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
+       struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 dpa_ctl;
 
@@ -1205,8 +1246,9 @@ static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
 
 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
 {
-       struct drm_device *dev = intel_dp->base.base.dev;
-       struct drm_crtc *crtc = intel_dp->base.base.crtc;
+       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
+       struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 dpa_ctl;
 
@@ -1228,7 +1270,7 @@ static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
 }
 
 /* If the sink supports it, try to set the power state appropriately */
-static void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
+void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
 {
        int ret, i;
 
@@ -1298,9 +1340,10 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
                                return true;
                        }
                }
-       }
 
-       DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n", intel_dp->output_reg);
+               DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
+                             intel_dp->output_reg);
+       }
 
        return true;
 }
@@ -1396,38 +1439,6 @@ intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_
                                              DP_LINK_STATUS_SIZE);
 }
 
-static uint8_t
-intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
-                    int r)
-{
-       return link_status[r - DP_LANE0_1_STATUS];
-}
-
-static uint8_t
-intel_get_adjust_request_voltage(uint8_t adjust_request[2],
-                                int lane)
-{
-       int         s = ((lane & 1) ?
-                        DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
-                        DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
-       uint8_t l = adjust_request[lane>>1];
-
-       return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
-}
-
-static uint8_t
-intel_get_adjust_request_pre_emphasis(uint8_t adjust_request[2],
-                                     int lane)
-{
-       int         s = ((lane & 1) ?
-                        DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
-                        DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
-       uint8_t l = adjust_request[lane>>1];
-
-       return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
-}
-
-
 #if 0
 static char    *voltage_names[] = {
        "0.4V", "0.6V", "0.8V", "1.2V"
@@ -1448,7 +1459,7 @@ static char       *link_train_names[] = {
 static uint8_t
 intel_dp_voltage_max(struct intel_dp *intel_dp)
 {
-       struct drm_device *dev = intel_dp->base.base.dev;
+       struct drm_device *dev = intel_dp_to_dev(intel_dp);
 
        if (IS_GEN7(dev) && is_cpu_edp(intel_dp))
                return DP_TRAIN_VOLTAGE_SWING_800;
@@ -1461,9 +1472,21 @@ intel_dp_voltage_max(struct intel_dp *intel_dp)
 static uint8_t
 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
 {
-       struct drm_device *dev = intel_dp->base.base.dev;
+       struct drm_device *dev = intel_dp_to_dev(intel_dp);
 
-       if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
+       if (IS_HASWELL(dev)) {
+               switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+               case DP_TRAIN_VOLTAGE_SWING_400:
+                       return DP_TRAIN_PRE_EMPHASIS_9_5;
+               case DP_TRAIN_VOLTAGE_SWING_600:
+                       return DP_TRAIN_PRE_EMPHASIS_6;
+               case DP_TRAIN_VOLTAGE_SWING_800:
+                       return DP_TRAIN_PRE_EMPHASIS_3_5;
+               case DP_TRAIN_VOLTAGE_SWING_1200:
+               default:
+                       return DP_TRAIN_PRE_EMPHASIS_0;
+               }
+       } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
                switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
                case DP_TRAIN_VOLTAGE_SWING_400:
                        return DP_TRAIN_PRE_EMPHASIS_6;
@@ -1494,13 +1517,12 @@ intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_ST
        uint8_t v = 0;
        uint8_t p = 0;
        int lane;
-       uint8_t *adjust_request = link_status + (DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS);
        uint8_t voltage_max;
        uint8_t preemph_max;
 
        for (lane = 0; lane < intel_dp->lane_count; lane++) {
-               uint8_t this_v = intel_get_adjust_request_voltage(adjust_request, lane);
-               uint8_t this_p = intel_get_adjust_request_pre_emphasis(adjust_request, lane);
+               uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
+               uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
 
                if (this_v > v)
                        v = this_v;
@@ -1617,52 +1639,38 @@ intel_gen7_edp_signal_levels(uint8_t train_set)
        }
 }
 
-static uint8_t
-intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
-                     int lane)
-{
-       int s = (lane & 1) * 4;
-       uint8_t l = link_status[lane>>1];
-
-       return (l >> s) & 0xf;
-}
-
-/* Check for clock recovery is done on all channels */
-static bool
-intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count)
+/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
+static uint32_t
+intel_dp_signal_levels_hsw(uint8_t train_set)
 {
-       int lane;
-       uint8_t lane_status;
-
-       for (lane = 0; lane < lane_count; lane++) {
-               lane_status = intel_get_lane_status(link_status, lane);
-               if ((lane_status & DP_LANE_CR_DONE) == 0)
-                       return false;
-       }
-       return true;
-}
+       int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
+                                        DP_TRAIN_PRE_EMPHASIS_MASK);
+       switch (signal_levels) {
+       case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
+               return DDI_BUF_EMP_400MV_0DB_HSW;
+       case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
+               return DDI_BUF_EMP_400MV_3_5DB_HSW;
+       case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
+               return DDI_BUF_EMP_400MV_6DB_HSW;
+       case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_9_5:
+               return DDI_BUF_EMP_400MV_9_5DB_HSW;
 
-/* Check to see if channel eq is done on all channels */
-#define CHANNEL_EQ_BITS (DP_LANE_CR_DONE|\
-                        DP_LANE_CHANNEL_EQ_DONE|\
-                        DP_LANE_SYMBOL_LOCKED)
-static bool
-intel_channel_eq_ok(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
-{
-       uint8_t lane_align;
-       uint8_t lane_status;
-       int lane;
+       case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
+               return DDI_BUF_EMP_600MV_0DB_HSW;
+       case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
+               return DDI_BUF_EMP_600MV_3_5DB_HSW;
+       case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
+               return DDI_BUF_EMP_600MV_6DB_HSW;
 
-       lane_align = intel_dp_link_status(link_status,
-                                         DP_LANE_ALIGN_STATUS_UPDATED);
-       if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
-               return false;
-       for (lane = 0; lane < intel_dp->lane_count; lane++) {
-               lane_status = intel_get_lane_status(link_status, lane);
-               if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS)
-                       return false;
+       case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
+               return DDI_BUF_EMP_800MV_0DB_HSW;
+       case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
+               return DDI_BUF_EMP_800MV_3_5DB_HSW;
+       default:
+               DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
+                             "0x%x\n", signal_levels);
+               return DDI_BUF_EMP_400MV_0DB_HSW;
        }
-       return true;
 }
 
 static bool
@@ -1670,11 +1678,49 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
                        uint32_t dp_reg_value,
                        uint8_t dp_train_pat)
 {
-       struct drm_device *dev = intel_dp->base.base.dev;
+       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       struct drm_device *dev = intel_dig_port->base.base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
+       enum port port = intel_dig_port->port;
        int ret;
+       uint32_t temp;
 
-       if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
+       if (IS_HASWELL(dev)) {
+               temp = I915_READ(DP_TP_CTL(port));
+
+               if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
+                       temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
+               else
+                       temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
+
+               temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
+               switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
+               case DP_TRAINING_PATTERN_DISABLE:
+                       temp |= DP_TP_CTL_LINK_TRAIN_IDLE;
+                       I915_WRITE(DP_TP_CTL(port), temp);
+
+                       if (wait_for((I915_READ(DP_TP_STATUS(port)) &
+                                     DP_TP_STATUS_IDLE_DONE), 1))
+                               DRM_ERROR("Timed out waiting for DP idle patterns\n");
+
+                       temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
+                       temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
+
+                       break;
+               case DP_TRAINING_PATTERN_1:
+                       temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
+                       break;
+               case DP_TRAINING_PATTERN_2:
+                       temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
+                       break;
+               case DP_TRAINING_PATTERN_3:
+                       temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
+                       break;
+               }
+               I915_WRITE(DP_TP_CTL(port), temp);
+
+       } else if (HAS_PCH_CPT(dev) &&
+                  (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
                dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT;
 
                switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
@@ -1734,16 +1780,20 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
 }
 
 /* Enable corresponding port and start training pattern 1 */
-static void
+void
 intel_dp_start_link_train(struct intel_dp *intel_dp)
 {
-       struct drm_device *dev = intel_dp->base.base.dev;
+       struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
+       struct drm_device *dev = encoder->dev;
        int i;
        uint8_t voltage;
        bool clock_recovery = false;
        int voltage_tries, loop_tries;
        uint32_t DP = intel_dp->DP;
 
+       if (IS_HASWELL(dev))
+               intel_ddi_prepare_link_retrain(encoder);
+
        /* Write the link configuration data */
        intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
                                  intel_dp->link_configuration,
@@ -1761,8 +1811,11 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
                uint8_t     link_status[DP_LINK_STATUS_SIZE];
                uint32_t    signal_levels;
 
-
-               if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
+               if (IS_HASWELL(dev)) {
+                       signal_levels = intel_dp_signal_levels_hsw(
+                                                       intel_dp->train_set[0]);
+                       DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels;
+               } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
                        signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
                        DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
                } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
@@ -1770,23 +1823,24 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
                        DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
                } else {
                        signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
-                       DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n", signal_levels);
                        DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
                }
+               DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n",
+                             signal_levels);
 
+               /* Set training pattern 1 */
                if (!intel_dp_set_link_train(intel_dp, DP,
                                             DP_TRAINING_PATTERN_1 |
                                             DP_LINK_SCRAMBLING_DISABLE))
                        break;
-               /* Set training pattern 1 */
 
-               udelay(100);
+               drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
                if (!intel_dp_get_link_status(intel_dp, link_status)) {
                        DRM_ERROR("failed to get link status\n");
                        break;
                }
 
-               if (intel_clock_recovery_ok(link_status, intel_dp->lane_count)) {
+               if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
                        DRM_DEBUG_KMS("clock recovery OK\n");
                        clock_recovery = true;
                        break;
@@ -1825,10 +1879,10 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
        intel_dp->DP = DP;
 }
 
-static void
+void
 intel_dp_complete_link_train(struct intel_dp *intel_dp)
 {
-       struct drm_device *dev = intel_dp->base.base.dev;
+       struct drm_device *dev = intel_dp_to_dev(intel_dp);
        bool channel_eq = false;
        int tries, cr_tries;
        uint32_t DP = intel_dp->DP;
@@ -1848,7 +1902,10 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
                        break;
                }
 
-               if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
+               if (IS_HASWELL(dev)) {
+                       signal_levels = intel_dp_signal_levels_hsw(intel_dp->train_set[0]);
+                       DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels;
+               } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
                        signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
                        DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
                } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
@@ -1865,18 +1922,18 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
                                             DP_LINK_SCRAMBLING_DISABLE))
                        break;
 
-               udelay(400);
+               drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
                if (!intel_dp_get_link_status(intel_dp, link_status))
                        break;
 
                /* Make sure clock is still ok */
-               if (!intel_clock_recovery_ok(link_status, intel_dp->lane_count)) {
+               if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
                        intel_dp_start_link_train(intel_dp);
                        cr_tries++;
                        continue;
                }
 
-               if (intel_channel_eq_ok(intel_dp, link_status)) {
+               if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
                        channel_eq = true;
                        break;
                }
@@ -1895,16 +1952,38 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
                ++tries;
        }
 
+       if (channel_eq)
+               DRM_DEBUG_KMS("Channel EQ done. DP Training successfull\n");
+
        intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_DISABLE);
 }
 
 static void
 intel_dp_link_down(struct intel_dp *intel_dp)
 {
-       struct drm_device *dev = intel_dp->base.base.dev;
+       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       struct drm_device *dev = intel_dig_port->base.base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        uint32_t DP = intel_dp->DP;
 
+       /*
+        * DDI code has a strict mode set sequence and we should try to respect
+        * it, otherwise we might hang the machine in many different ways. So we
+        * really should be disabling the port only on a complete crtc_disable
+        * sequence. This function is just called under two conditions on DDI
+        * code:
+        * - Link train failed while doing crtc_enable, and on this case we
+        *   really should respect the mode set sequence and wait for a
+        *   crtc_disable.
+        * - Someone turned the monitor off and intel_dp_check_link_status
+        *   called us. We don't need to disable the whole port on this case, so
+        *   when someone turns the monitor on again,
+        *   intel_ddi_prepare_link_retrain will take care of redoing the link
+        *   train.
+        */
+       if (IS_HASWELL(dev))
+               return;
+
        if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
                return;
 
@@ -1923,7 +2002,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
 
        if (HAS_PCH_IBX(dev) &&
            I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
-               struct drm_crtc *crtc = intel_dp->base.base.crtc;
+               struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
 
                /* Hardware workaround: leaving our transcoder select
                 * set to transcoder B while it's off will prevent the
@@ -2024,7 +2103,7 @@ static void
 intel_dp_handle_test_request(struct intel_dp *intel_dp)
 {
        /* NAK by default */
-       intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_ACK);
+       intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_NAK);
 }
 
 /*
@@ -2036,16 +2115,17 @@ intel_dp_handle_test_request(struct intel_dp *intel_dp)
  *  4. Check link status on receipt of hot-plug interrupt
  */
 
-static void
+void
 intel_dp_check_link_status(struct intel_dp *intel_dp)
 {
+       struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
        u8 sink_irq_vector;
        u8 link_status[DP_LINK_STATUS_SIZE];
 
-       if (!intel_dp->base.connectors_active)
+       if (!intel_encoder->connectors_active)
                return;
 
-       if (WARN_ON(!intel_dp->base.base.crtc))
+       if (WARN_ON(!intel_encoder->base.crtc))
                return;
 
        /* Try to read receiver status if the link appears to be up */
@@ -2074,9 +2154,9 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
                        DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
        }
 
-       if (!intel_channel_eq_ok(intel_dp, link_status)) {
+       if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
                DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
-                             drm_get_encoder_name(&intel_dp->base.base));
+                             drm_get_encoder_name(&intel_encoder->base));
                intel_dp_start_link_train(intel_dp);
                intel_dp_complete_link_train(intel_dp);
        }
@@ -2125,11 +2205,12 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
 static enum drm_connector_status
 ironlake_dp_detect(struct intel_dp *intel_dp)
 {
+       struct drm_device *dev = intel_dp_to_dev(intel_dp);
        enum drm_connector_status status;
 
        /* Can't disconnect eDP, but you can close the lid... */
        if (is_edp(intel_dp)) {
-               status = intel_panel_detect(intel_dp->base.base.dev);
+               status = intel_panel_detect(dev);
                if (status == connector_status_unknown)
                        status = connector_status_connected;
                return status;
@@ -2141,7 +2222,7 @@ ironlake_dp_detect(struct intel_dp *intel_dp)
 static enum drm_connector_status
 g4x_dp_detect(struct intel_dp *intel_dp)
 {
-       struct drm_device *dev = intel_dp->base.base.dev;
+       struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct drm_i915_private *dev_priv = dev->dev_private;
        uint32_t bit;
 
@@ -2168,44 +2249,45 @@ g4x_dp_detect(struct intel_dp *intel_dp)
 static struct edid *
 intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
 {
-       struct intel_dp *intel_dp = intel_attached_dp(connector);
-       struct edid     *edid;
-       int size;
+       struct intel_connector *intel_connector = to_intel_connector(connector);
 
-       if (is_edp(intel_dp)) {
-               if (!intel_dp->edid)
+       /* use cached edid if we have one */
+       if (intel_connector->edid) {
+               struct edid *edid;
+               int size;
+
+               /* invalid edid */
+               if (IS_ERR(intel_connector->edid))
                        return NULL;
 
-               size = (intel_dp->edid->extensions + 1) * EDID_LENGTH;
+               size = (intel_connector->edid->extensions + 1) * EDID_LENGTH;
                edid = kmalloc(size, GFP_KERNEL);
                if (!edid)
                        return NULL;
 
-               memcpy(edid, intel_dp->edid, size);
+               memcpy(edid, intel_connector->edid, size);
                return edid;
        }
 
-       edid = drm_get_edid(connector, adapter);
-       return edid;
+       return drm_get_edid(connector, adapter);
 }
 
 static int
 intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter)
 {
-       struct intel_dp *intel_dp = intel_attached_dp(connector);
-       int     ret;
+       struct intel_connector *intel_connector = to_intel_connector(connector);
 
-       if (is_edp(intel_dp)) {
-               drm_mode_connector_update_edid_property(connector,
-                                                       intel_dp->edid);
-               ret = drm_add_edid_modes(connector, intel_dp->edid);
-               drm_edid_to_eld(connector,
-                               intel_dp->edid);
-               return intel_dp->edid_mode_count;
+       /* use cached edid if we have one */
+       if (intel_connector->edid) {
+               /* invalid edid */
+               if (IS_ERR(intel_connector->edid))
+                       return 0;
+
+               return intel_connector_update_modes(connector,
+                                                   intel_connector->edid);
        }
 
-       ret = intel_ddc_get_modes(connector, adapter);
-       return ret;
+       return intel_ddc_get_modes(connector, adapter);
 }
 
 
@@ -2219,9 +2301,12 @@ static enum drm_connector_status
 intel_dp_detect(struct drm_connector *connector, bool force)
 {
        struct intel_dp *intel_dp = intel_attached_dp(connector);
-       struct drm_device *dev = intel_dp->base.base.dev;
+       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       struct intel_encoder *intel_encoder = &intel_dig_port->base;
+       struct drm_device *dev = connector->dev;
        enum drm_connector_status status;
        struct edid *edid = NULL;
+       char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
 
        intel_dp->has_audio = false;
 
@@ -2230,10 +2315,9 @@ intel_dp_detect(struct drm_connector *connector, bool force)
        else
                status = g4x_dp_detect(intel_dp);
 
-       DRM_DEBUG_KMS("DPCD: %02hx%02hx%02hx%02hx%02hx%02hx%02hx%02hx\n",
-                     intel_dp->dpcd[0], intel_dp->dpcd[1], intel_dp->dpcd[2],
-                     intel_dp->dpcd[3], intel_dp->dpcd[4], intel_dp->dpcd[5],
-                     intel_dp->dpcd[6], intel_dp->dpcd[7]);
+       hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd),
+                          32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false);
+       DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
 
        if (status != connector_status_connected)
                return status;
@@ -2250,49 +2334,31 @@ intel_dp_detect(struct drm_connector *connector, bool force)
                }
        }
 
+       if (intel_encoder->type != INTEL_OUTPUT_EDP)
+               intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
        return connector_status_connected;
 }
 
 static int intel_dp_get_modes(struct drm_connector *connector)
 {
        struct intel_dp *intel_dp = intel_attached_dp(connector);
-       struct drm_device *dev = intel_dp->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_connector *intel_connector = to_intel_connector(connector);
+       struct drm_device *dev = connector->dev;
        int ret;
 
        /* We should parse the EDID data and find out if it has an audio sink
         */
 
        ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter);
-       if (ret) {
-               if (is_edp(intel_dp) && !intel_dp->panel_fixed_mode) {
-                       struct drm_display_mode *newmode;
-                       list_for_each_entry(newmode, &connector->probed_modes,
-                                           head) {
-                               if ((newmode->type & DRM_MODE_TYPE_PREFERRED)) {
-                                       intel_dp->panel_fixed_mode =
-                                               drm_mode_duplicate(dev, newmode);
-                                       break;
-                               }
-                       }
-               }
+       if (ret)
                return ret;
-       }
 
-       /* if eDP has no EDID, try to use fixed panel mode from VBT */
-       if (is_edp(intel_dp)) {
-               /* initialize panel mode from VBT if available for eDP */
-               if (intel_dp->panel_fixed_mode == NULL && dev_priv->lfp_lvds_vbt_mode != NULL) {
-                       intel_dp->panel_fixed_mode =
-                               drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
-                       if (intel_dp->panel_fixed_mode) {
-                               intel_dp->panel_fixed_mode->type |=
-                                       DRM_MODE_TYPE_PREFERRED;
-                       }
-               }
-               if (intel_dp->panel_fixed_mode) {
-                       struct drm_display_mode *mode;
-                       mode = drm_mode_duplicate(dev, intel_dp->panel_fixed_mode);
+       /* if eDP has no EDID, fall back to fixed mode */
+       if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
+               struct drm_display_mode *mode;
+               mode = drm_mode_duplicate(dev,
+                                         intel_connector->panel.fixed_mode);
+               if (mode) {
                        drm_mode_probed_add(connector, mode);
                        return 1;
                }
@@ -2322,10 +2388,12 @@ intel_dp_set_property(struct drm_connector *connector,
                      uint64_t val)
 {
        struct drm_i915_private *dev_priv = connector->dev->dev_private;
-       struct intel_dp *intel_dp = intel_attached_dp(connector);
+       struct intel_connector *intel_connector = to_intel_connector(connector);
+       struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
+       struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
        int ret;
 
-       ret = drm_connector_property_set_value(connector, property, val);
+       ret = drm_object_property_set_value(&connector->base, property, val);
        if (ret)
                return ret;
 
@@ -2358,11 +2426,27 @@ intel_dp_set_property(struct drm_connector *connector,
                goto done;
        }
 
+       if (is_edp(intel_dp) &&
+           property == connector->dev->mode_config.scaling_mode_property) {
+               if (val == DRM_MODE_SCALE_NONE) {
+                       DRM_DEBUG_KMS("no scaling not supported\n");
+                       return -EINVAL;
+               }
+
+               if (intel_connector->panel.fitting_mode == val) {
+                       /* the eDP scaling property is not changed */
+                       return 0;
+               }
+               intel_connector->panel.fitting_mode = val;
+
+               goto done;
+       }
+
        return -EINVAL;
 
 done:
-       if (intel_dp->base.base.crtc) {
-               struct drm_crtc *crtc = intel_dp->base.base.crtc;
+       if (intel_encoder->base.crtc) {
+               struct drm_crtc *crtc = intel_encoder->base.crtc;
                intel_set_mode(crtc, &crtc->mode,
                               crtc->x, crtc->y, crtc->fb);
        }
@@ -2375,27 +2459,33 @@ intel_dp_destroy(struct drm_connector *connector)
 {
        struct drm_device *dev = connector->dev;
        struct intel_dp *intel_dp = intel_attached_dp(connector);
+       struct intel_connector *intel_connector = to_intel_connector(connector);
 
-       if (is_edp(intel_dp))
+       if (!IS_ERR_OR_NULL(intel_connector->edid))
+               kfree(intel_connector->edid);
+
+       if (is_edp(intel_dp)) {
                intel_panel_destroy_backlight(dev);
+               intel_panel_fini(&intel_connector->panel);
+       }
 
        drm_sysfs_connector_remove(connector);
        drm_connector_cleanup(connector);
        kfree(connector);
 }
 
-static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
+void intel_dp_encoder_destroy(struct drm_encoder *encoder)
 {
-       struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+       struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+       struct intel_dp *intel_dp = &intel_dig_port->dp;
 
        i2c_del_adapter(&intel_dp->adapter);
        drm_encoder_cleanup(encoder);
        if (is_edp(intel_dp)) {
-               kfree(intel_dp->edid);
                cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
                ironlake_panel_vdd_off_sync(intel_dp);
        }
-       kfree(intel_dp);
+       kfree(intel_dig_port);
 }
 
 static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
@@ -2425,7 +2515,7 @@ static const struct drm_encoder_funcs intel_dp_enc_funcs = {
 static void
 intel_dp_hot_plug(struct intel_encoder *intel_encoder)
 {
-       struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
+       struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
 
        intel_dp_check_link_status(intel_dp);
 }
@@ -2435,13 +2525,14 @@ int
 intel_trans_dp_port_sel(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
-       struct intel_encoder *encoder;
+       struct intel_encoder *intel_encoder;
+       struct intel_dp *intel_dp;
 
-       for_each_encoder_on_crtc(dev, crtc, encoder) {
-               struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+       for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+               intel_dp = enc_to_intel_dp(&intel_encoder->base);
 
-               if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT ||
-                   intel_dp->base.type == INTEL_OUTPUT_EDP)
+               if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
+                   intel_encoder->type == INTEL_OUTPUT_EDP)
                        return intel_dp->output_reg;
        }
 
@@ -2471,78 +2562,191 @@ bool intel_dpd_is_edp(struct drm_device *dev)
 static void
 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
 {
+       struct intel_connector *intel_connector = to_intel_connector(connector);
+
        intel_attach_force_audio_property(connector);
        intel_attach_broadcast_rgb_property(connector);
+
+       if (is_edp(intel_dp)) {
+               drm_mode_create_scaling_mode_property(connector->dev);
+               drm_object_attach_property(
+                       &connector->base,
+                       connector->dev->mode_config.scaling_mode_property,
+                       DRM_MODE_SCALE_ASPECT);
+               intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
+       }
+}
+
+static void
+intel_dp_init_panel_power_sequencer(struct drm_device *dev,
+                                   struct intel_dp *intel_dp)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct edp_power_seq cur, vbt, spec, final;
+       u32 pp_on, pp_off, pp_div, pp;
+
+       /* Workaround: Need to write PP_CONTROL with the unlock key as
+        * the very first thing. */
+       pp = ironlake_get_pp_control(dev_priv);
+       I915_WRITE(PCH_PP_CONTROL, pp);
+
+       pp_on = I915_READ(PCH_PP_ON_DELAYS);
+       pp_off = I915_READ(PCH_PP_OFF_DELAYS);
+       pp_div = I915_READ(PCH_PP_DIVISOR);
+
+       /* Pull timing values out of registers */
+       cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
+               PANEL_POWER_UP_DELAY_SHIFT;
+
+       cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
+               PANEL_LIGHT_ON_DELAY_SHIFT;
+
+       cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
+               PANEL_LIGHT_OFF_DELAY_SHIFT;
+
+       cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
+               PANEL_POWER_DOWN_DELAY_SHIFT;
+
+       cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
+                      PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
+
+       DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
+                     cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
+
+       vbt = dev_priv->edp.pps;
+
+       /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
+        * our hw here, which are all in 100usec. */
+       spec.t1_t3 = 210 * 10;
+       spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
+       spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
+       spec.t10 = 500 * 10;
+       /* This one is special and actually in units of 100ms, but zero
+        * based in the hw (so we need to add 100 ms). But the sw vbt
+        * table multiplies it with 1000 to make it in units of 100usec,
+        * too. */
+       spec.t11_t12 = (510 + 100) * 10;
+
+       DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
+                     vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
+
+       /* Use the max of the register settings and vbt. If both are
+        * unset, fall back to the spec limits. */
+#define assign_final(field)    final.field = (max(cur.field, vbt.field) == 0 ? \
+                                      spec.field : \
+                                      max(cur.field, vbt.field))
+       assign_final(t1_t3);
+       assign_final(t8);
+       assign_final(t9);
+       assign_final(t10);
+       assign_final(t11_t12);
+#undef assign_final
+
+#define get_delay(field)       (DIV_ROUND_UP(final.field, 10))
+       intel_dp->panel_power_up_delay = get_delay(t1_t3);
+       intel_dp->backlight_on_delay = get_delay(t8);
+       intel_dp->backlight_off_delay = get_delay(t9);
+       intel_dp->panel_power_down_delay = get_delay(t10);
+       intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
+#undef get_delay
+
+       /* And finally store the new values in the power sequencer. */
+       pp_on = (final.t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
+               (final.t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
+       pp_off = (final.t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
+                (final.t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
+       /* Compute the divisor for the pp clock, simply match the Bspec
+        * formula. */
+       pp_div = ((100 * intel_pch_rawclk(dev))/2 - 1)
+                       << PP_REFERENCE_DIVIDER_SHIFT;
+       pp_div |= (DIV_ROUND_UP(final.t11_t12, 1000)
+                       << PANEL_POWER_CYCLE_DELAY_SHIFT);
+
+       /* Haswell doesn't have any port selection bits for the panel
+        * power sequencer any more. */
+       if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
+               if (is_cpu_edp(intel_dp))
+                       pp_on |= PANEL_POWER_PORT_DP_A;
+               else
+                       pp_on |= PANEL_POWER_PORT_DP_D;
+       }
+
+       I915_WRITE(PCH_PP_ON_DELAYS, pp_on);
+       I915_WRITE(PCH_PP_OFF_DELAYS, pp_off);
+       I915_WRITE(PCH_PP_DIVISOR, pp_div);
+
+
+       DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
+                     intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
+                     intel_dp->panel_power_cycle_delay);
+
+       DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
+                     intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
+
+       DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
+                     I915_READ(PCH_PP_ON_DELAYS),
+                     I915_READ(PCH_PP_OFF_DELAYS),
+                     I915_READ(PCH_PP_DIVISOR));
 }
 
 void
-intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
+intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
+                       struct intel_connector *intel_connector)
 {
+       struct drm_connector *connector = &intel_connector->base;
+       struct intel_dp *intel_dp = &intel_dig_port->dp;
+       struct intel_encoder *intel_encoder = &intel_dig_port->base;
+       struct drm_device *dev = intel_encoder->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_connector *connector;
-       struct intel_dp *intel_dp;
-       struct intel_encoder *intel_encoder;
-       struct intel_connector *intel_connector;
+       struct drm_display_mode *fixed_mode = NULL;
+       enum port port = intel_dig_port->port;
        const char *name = NULL;
        int type;
 
-       intel_dp = kzalloc(sizeof(struct intel_dp), GFP_KERNEL);
-       if (!intel_dp)
-               return;
-
-       intel_dp->output_reg = output_reg;
-       intel_dp->port = port;
        /* Preserve the current hw state. */
        intel_dp->DP = I915_READ(intel_dp->output_reg);
+       intel_dp->attached_connector = intel_connector;
 
-       intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
-       if (!intel_connector) {
-               kfree(intel_dp);
-               return;
-       }
-       intel_encoder = &intel_dp->base;
-
-       if (HAS_PCH_SPLIT(dev) && output_reg == PCH_DP_D)
+       if (HAS_PCH_SPLIT(dev) && port == PORT_D)
                if (intel_dpd_is_edp(dev))
                        intel_dp->is_pch_edp = true;
 
-       if (output_reg == DP_A || is_pch_edp(intel_dp)) {
+       /*
+        * FIXME : We need to initialize built-in panels before external panels.
+        * For X0, DP_C is fixed as eDP. Revisit this as part of VLV eDP cleanup
+        */
+       if (IS_VALLEYVIEW(dev) && port == PORT_C) {
+               type = DRM_MODE_CONNECTOR_eDP;
+               intel_encoder->type = INTEL_OUTPUT_EDP;
+       } else if (port == PORT_A || is_pch_edp(intel_dp)) {
                type = DRM_MODE_CONNECTOR_eDP;
                intel_encoder->type = INTEL_OUTPUT_EDP;
        } else {
+               /* The intel_encoder->type value may be INTEL_OUTPUT_UNKNOWN for
+                * DDI or INTEL_OUTPUT_DISPLAYPORT for the older gens, so don't
+                * rewrite it.
+                */
                type = DRM_MODE_CONNECTOR_DisplayPort;
-               intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
        }
 
-       connector = &intel_connector->base;
        drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
        drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
 
        connector->polled = DRM_CONNECTOR_POLL_HPD;
-
-       intel_encoder->cloneable = false;
-
-       INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
-                         ironlake_panel_vdd_work);
-
-       intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
-
        connector->interlace_allowed = true;
        connector->doublescan_allowed = 0;
 
-       drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
-                        DRM_MODE_ENCODER_TMDS);
-       drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs);
+       INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
+                         ironlake_panel_vdd_work);
 
        intel_connector_attach_encoder(intel_connector, intel_encoder);
        drm_sysfs_connector_add(connector);
 
-       intel_encoder->enable = intel_enable_dp;
-       intel_encoder->pre_enable = intel_pre_enable_dp;
-       intel_encoder->disable = intel_disable_dp;
-       intel_encoder->post_disable = intel_post_disable_dp;
-       intel_encoder->get_hw_state = intel_dp_get_hw_state;
-       intel_connector->get_hw_state = intel_connector_get_hw_state;
+       if (IS_HASWELL(dev))
+               intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
+       else
+               intel_connector->get_hw_state = intel_connector_get_hw_state;
+
 
        /* Set up the DDC bus. */
        switch (port) {
@@ -2566,66 +2770,15 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
                break;
        }
 
-       /* Cache some DPCD data in the eDP case */
-       if (is_edp(intel_dp)) {
-               struct edp_power_seq    cur, vbt;
-               u32 pp_on, pp_off, pp_div;
-
-               pp_on = I915_READ(PCH_PP_ON_DELAYS);
-               pp_off = I915_READ(PCH_PP_OFF_DELAYS);
-               pp_div = I915_READ(PCH_PP_DIVISOR);
-
-               if (!pp_on || !pp_off || !pp_div) {
-                       DRM_INFO("bad panel power sequencing delays, disabling panel\n");
-                       intel_dp_encoder_destroy(&intel_dp->base.base);
-                       intel_dp_destroy(&intel_connector->base);
-                       return;
-               }
-
-               /* Pull timing values out of registers */
-               cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
-                       PANEL_POWER_UP_DELAY_SHIFT;
-
-               cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
-                       PANEL_LIGHT_ON_DELAY_SHIFT;
-
-               cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
-                       PANEL_LIGHT_OFF_DELAY_SHIFT;
-
-               cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
-                       PANEL_POWER_DOWN_DELAY_SHIFT;
-
-               cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
-                              PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
-
-               DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
-                             cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
-
-               vbt = dev_priv->edp.pps;
-
-               DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
-                             vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
-
-#define get_delay(field)       ((max(cur.field, vbt.field) + 9) / 10)
-
-               intel_dp->panel_power_up_delay = get_delay(t1_t3);
-               intel_dp->backlight_on_delay = get_delay(t8);
-               intel_dp->backlight_off_delay = get_delay(t9);
-               intel_dp->panel_power_down_delay = get_delay(t10);
-               intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
-
-               DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
-                             intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
-                             intel_dp->panel_power_cycle_delay);
-
-               DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
-                             intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
-       }
+       if (is_edp(intel_dp))
+               intel_dp_init_panel_power_sequencer(dev, intel_dp);
 
        intel_dp_i2c_init(intel_dp, intel_connector, name);
 
+       /* Cache DPCD and EDID for edp. */
        if (is_edp(intel_dp)) {
                bool ret;
+               struct drm_display_mode *scan;
                struct edid *edid;
 
                ironlake_edp_panel_vdd_on(intel_dp);
@@ -2640,29 +2793,47 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
                } else {
                        /* if this fails, presume the device is a ghost */
                        DRM_INFO("failed to retrieve link info, disabling eDP\n");
-                       intel_dp_encoder_destroy(&intel_dp->base.base);
-                       intel_dp_destroy(&intel_connector->base);
+                       intel_dp_encoder_destroy(&intel_encoder->base);
+                       intel_dp_destroy(connector);
                        return;
                }
 
                ironlake_edp_panel_vdd_on(intel_dp);
                edid = drm_get_edid(connector, &intel_dp->adapter);
                if (edid) {
-                       drm_mode_connector_update_edid_property(connector,
-                                                               edid);
-                       intel_dp->edid_mode_count =
-                               drm_add_edid_modes(connector, edid);
-                       drm_edid_to_eld(connector, edid);
-                       intel_dp->edid = edid;
+                       if (drm_add_edid_modes(connector, edid)) {
+                               drm_mode_connector_update_edid_property(connector, edid);
+                               drm_edid_to_eld(connector, edid);
+                       } else {
+                               kfree(edid);
+                               edid = ERR_PTR(-EINVAL);
+                       }
+               } else {
+                       edid = ERR_PTR(-ENOENT);
+               }
+               intel_connector->edid = edid;
+
+               /* prefer fixed mode from EDID if available */
+               list_for_each_entry(scan, &connector->probed_modes, head) {
+                       if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
+                               fixed_mode = drm_mode_duplicate(dev, scan);
+                               break;
+                       }
                }
+
+               /* fallback to VBT if available for eDP */
+               if (!fixed_mode && dev_priv->lfp_lvds_vbt_mode) {
+                       fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
+                       if (fixed_mode)
+                               fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
+               }
+
                ironlake_edp_panel_vdd_off(intel_dp, false);
        }
 
-       intel_encoder->hot_plug = intel_dp_hot_plug;
-
        if (is_edp(intel_dp)) {
-               dev_priv->int_edp_connector = connector;
-               intel_panel_setup_backlight(dev);
+               intel_panel_init(&intel_connector->panel, fixed_mode);
+               intel_panel_setup_backlight(connector);
        }
 
        intel_dp_add_properties(intel_dp, connector);
@@ -2676,3 +2847,45 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
                I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
        }
 }
+
+void
+intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
+{
+       struct intel_digital_port *intel_dig_port;
+       struct intel_encoder *intel_encoder;
+       struct drm_encoder *encoder;
+       struct intel_connector *intel_connector;
+
+       intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
+       if (!intel_dig_port)
+               return;
+
+       intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+       if (!intel_connector) {
+               kfree(intel_dig_port);
+               return;
+       }
+
+       intel_encoder = &intel_dig_port->base;
+       encoder = &intel_encoder->base;
+
+       drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
+                        DRM_MODE_ENCODER_TMDS);
+       drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs);
+
+       intel_encoder->enable = intel_enable_dp;
+       intel_encoder->pre_enable = intel_pre_enable_dp;
+       intel_encoder->disable = intel_disable_dp;
+       intel_encoder->post_disable = intel_post_disable_dp;
+       intel_encoder->get_hw_state = intel_dp_get_hw_state;
+
+       intel_dig_port->port = port;
+       intel_dig_port->dp.output_reg = output_reg;
+
+       intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+       intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+       intel_encoder->cloneable = false;
+       intel_encoder->hot_plug = intel_dp_hot_plug;
+
+       intel_dp_init_connector(intel_dig_port, intel_connector);
+}
index fe71425..8a1bd4a 100644 (file)
@@ -94,6 +94,7 @@
 #define INTEL_OUTPUT_HDMI 6
 #define INTEL_OUTPUT_DISPLAYPORT 7
 #define INTEL_OUTPUT_EDP 8
+#define INTEL_OUTPUT_UNKNOWN 9
 
 #define INTEL_DVO_CHIP_NONE 0
 #define INTEL_DVO_CHIP_LVDS 1
@@ -163,6 +164,11 @@ struct intel_encoder {
        int crtc_mask;
 };
 
+struct intel_panel {
+       struct drm_display_mode *fixed_mode;
+       int fitting_mode;
+};
+
 struct intel_connector {
        struct drm_connector base;
        /*
@@ -179,12 +185,19 @@ struct intel_connector {
        /* Reads out the current hw, returning true if the connector is enabled
         * and active (i.e. dpms ON state). */
        bool (*get_hw_state)(struct intel_connector *);
+
+       /* Panel info for eDP and LVDS */
+       struct intel_panel panel;
+
+       /* Cached EDID for eDP and LVDS. May hold ERR_PTR for invalid EDID. */
+       struct edid *edid;
 };
 
 struct intel_crtc {
        struct drm_crtc base;
        enum pipe pipe;
        enum plane plane;
+       enum transcoder cpu_transcoder;
        u8 lut_r[256], lut_g[256], lut_b[256];
        /*
         * Whether the crtc and the connected output pipeline is active. Implies
@@ -198,6 +211,8 @@ struct intel_crtc {
        struct intel_unpin_work *unpin_work;
        int fdi_lanes;
 
+       atomic_t unpin_work_count;
+
        /* Display surface base address adjustement for pageflips. Note that on
         * gen4+ this only adjusts up to a tile, offsets within a tile are
         * handled in the hw itself (with the TILEOFF register). */
@@ -212,12 +227,14 @@ struct intel_crtc {
 
        /* We can share PLLs across outputs if the timings match */
        struct intel_pch_pll *pch_pll;
+       uint32_t ddi_pll_sel;
 };
 
 struct intel_plane {
        struct drm_plane base;
        enum pipe pipe;
        struct drm_i915_gem_object *obj;
+       bool can_scale;
        int max_downscale;
        u32 lut_r[1024], lut_g[1024], lut_b[1024];
        void (*update_plane)(struct drm_plane *plane,
@@ -317,10 +334,8 @@ struct dip_infoframe {
 } __attribute__((packed));
 
 struct intel_hdmi {
-       struct intel_encoder base;
        u32 sdvox_reg;
        int ddc_bus;
-       int ddi_port;
        uint32_t color_range;
        bool has_hdmi_sink;
        bool has_audio;
@@ -331,18 +346,15 @@ struct intel_hdmi {
                               struct drm_display_mode *adjusted_mode);
 };
 
-#define DP_RECEIVER_CAP_SIZE           0xf
 #define DP_MAX_DOWNSTREAM_PORTS                0x10
 #define DP_LINK_CONFIGURATION_SIZE     9
 
 struct intel_dp {
-       struct intel_encoder base;
        uint32_t output_reg;
        uint32_t DP;
        uint8_t  link_configuration[DP_LINK_CONFIGURATION_SIZE];
        bool has_audio;
        enum hdmi_force_audio force_audio;
-       enum port port;
        uint32_t color_range;
        uint8_t link_bw;
        uint8_t lane_count;
@@ -357,11 +369,16 @@ struct intel_dp {
        int panel_power_cycle_delay;
        int backlight_on_delay;
        int backlight_off_delay;
-       struct drm_display_mode *panel_fixed_mode;  /* for eDP */
        struct delayed_work panel_vdd_work;
        bool want_panel_vdd;
-       struct edid *edid; /* cached EDID for eDP */
-       int edid_mode_count;
+       struct intel_connector *attached_connector;
+};
+
+struct intel_digital_port {
+       struct intel_encoder base;
+       enum port port;
+       struct intel_dp dp;
+       struct intel_hdmi hdmi;
 };
 
 static inline struct drm_crtc *
@@ -380,11 +397,14 @@ intel_get_crtc_for_plane(struct drm_device *dev, int plane)
 
 struct intel_unpin_work {
        struct work_struct work;
-       struct drm_device *dev;
+       struct drm_crtc *crtc;
        struct drm_i915_gem_object *old_fb_obj;
        struct drm_i915_gem_object *pending_flip_obj;
        struct drm_pending_vblank_event *event;
-       int pending;
+       atomic_t pending;
+#define INTEL_FLIP_INACTIVE    0
+#define INTEL_FLIP_PENDING     1
+#define INTEL_FLIP_COMPLETE    2
        bool enable_stall_check;
 };
 
@@ -395,6 +415,8 @@ struct intel_fbc_work {
        int interval;
 };
 
+int intel_pch_rawclk(struct drm_device *dev);
+
 int intel_connector_update_modes(struct drm_connector *connector,
                                struct edid *edid);
 int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
@@ -405,7 +427,12 @@ extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector)
 extern void intel_crt_init(struct drm_device *dev);
 extern void intel_hdmi_init(struct drm_device *dev,
                            int sdvox_reg, enum port port);
+extern void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
+                                     struct intel_connector *intel_connector);
 extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
+extern bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
+                                 const struct drm_display_mode *mode,
+                                 struct drm_display_mode *adjusted_mode);
 extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
 extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
                            bool is_sdvob);
@@ -418,10 +445,27 @@ extern void intel_mark_fb_idle(struct drm_i915_gem_object *obj);
 extern bool intel_lvds_init(struct drm_device *dev);
 extern void intel_dp_init(struct drm_device *dev, int output_reg,
                          enum port port);
+extern void intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
+                                   struct intel_connector *intel_connector);
 void
 intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
                 struct drm_display_mode *adjusted_mode);
+extern void intel_dp_init_link_config(struct intel_dp *intel_dp);
+extern void intel_dp_start_link_train(struct intel_dp *intel_dp);
+extern void intel_dp_complete_link_train(struct intel_dp *intel_dp);
+extern void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
+extern void intel_dp_encoder_destroy(struct drm_encoder *encoder);
+extern void intel_dp_check_link_status(struct intel_dp *intel_dp);
+extern bool intel_dp_mode_fixup(struct drm_encoder *encoder,
+                               const struct drm_display_mode *mode,
+                               struct drm_display_mode *adjusted_mode);
 extern bool intel_dpd_is_edp(struct drm_device *dev);
+extern void ironlake_edp_backlight_on(struct intel_dp *intel_dp);
+extern void ironlake_edp_backlight_off(struct intel_dp *intel_dp);
+extern void ironlake_edp_panel_on(struct intel_dp *intel_dp);
+extern void ironlake_edp_panel_off(struct intel_dp *intel_dp);
+extern void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
+extern void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
 extern void intel_edp_link_config(struct intel_encoder *, int *, int *);
 extern int intel_edp_target_clock(struct intel_encoder *,
                                  struct drm_display_mode *mode);
@@ -431,6 +475,10 @@ extern void intel_flush_display_plane(struct drm_i915_private *dev_priv,
                                      enum plane plane);
 
 /* intel_panel.c */
+extern int intel_panel_init(struct intel_panel *panel,
+                           struct drm_display_mode *fixed_mode);
+extern void intel_panel_fini(struct intel_panel *panel);
+
 extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
                                   struct drm_display_mode *adjusted_mode);
 extern void intel_pch_panel_fitting(struct drm_device *dev,
@@ -439,7 +487,7 @@ extern void intel_pch_panel_fitting(struct drm_device *dev,
                                    struct drm_display_mode *adjusted_mode);
 extern u32 intel_panel_get_max_backlight(struct drm_device *dev);
 extern void intel_panel_set_backlight(struct drm_device *dev, u32 level);
-extern int intel_panel_setup_backlight(struct drm_device *dev);
+extern int intel_panel_setup_backlight(struct drm_connector *connector);
 extern void intel_panel_enable_backlight(struct drm_device *dev,
                                         enum pipe pipe);
 extern void intel_panel_disable_backlight(struct drm_device *dev);
@@ -473,6 +521,31 @@ static inline struct intel_encoder *intel_attached_encoder(struct drm_connector
        return to_intel_connector(connector)->encoder;
 }
 
+static inline struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
+{
+       struct intel_digital_port *intel_dig_port =
+               container_of(encoder, struct intel_digital_port, base.base);
+       return &intel_dig_port->dp;
+}
+
+static inline struct intel_digital_port *
+enc_to_dig_port(struct drm_encoder *encoder)
+{
+       return container_of(encoder, struct intel_digital_port, base.base);
+}
+
+static inline struct intel_digital_port *
+dp_to_dig_port(struct intel_dp *intel_dp)
+{
+       return container_of(intel_dp, struct intel_digital_port, dp);
+}
+
+static inline struct intel_digital_port *
+hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
+{
+       return container_of(intel_hdmi, struct intel_digital_port, hdmi);
+}
+
 extern void intel_connector_attach_encoder(struct intel_connector *connector,
                                           struct intel_encoder *encoder);
 extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
@@ -481,8 +554,12 @@ extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
                                                    struct drm_crtc *crtc);
 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
                                struct drm_file *file_priv);
+extern enum transcoder
+intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
+                            enum pipe pipe);
 extern void intel_wait_for_vblank(struct drm_device *dev, int pipe);
 extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
+extern int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
 
 struct intel_load_detect_pipe {
        struct drm_framebuffer *release_fb;
@@ -550,6 +627,10 @@ extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
 extern void intel_update_linetime_watermarks(struct drm_device *dev, int pipe,
                         struct drm_display_mode *mode);
 
+extern unsigned long intel_gen4_compute_offset_xtiled(int *x, int *y,
+                                                     unsigned int bpp,
+                                                     unsigned int pitch);
+
 extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
                                     struct drm_file *file_priv);
 extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
@@ -573,12 +654,22 @@ extern void intel_disable_gt_powersave(struct drm_device *dev);
 extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv);
 extern void ironlake_teardown_rc6(struct drm_device *dev);
 
-extern void intel_enable_ddi(struct intel_encoder *encoder);
-extern void intel_disable_ddi(struct intel_encoder *encoder);
 extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
                                   enum pipe *pipe);
-extern void intel_ddi_mode_set(struct drm_encoder *encoder,
-                               struct drm_display_mode *mode,
-                               struct drm_display_mode *adjusted_mode);
+extern int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv);
+extern void intel_ddi_pll_init(struct drm_device *dev);
+extern void intel_ddi_enable_pipe_func(struct drm_crtc *crtc);
+extern void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
+                                             enum transcoder cpu_transcoder);
+extern void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
+extern void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
+extern void intel_ddi_setup_hw_pll_state(struct drm_device *dev);
+extern bool intel_ddi_pll_mode_set(struct drm_crtc *crtc, int clock);
+extern void intel_ddi_put_crtc_pll(struct drm_crtc *crtc);
+extern void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
+extern void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
+extern bool
+intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
+extern void intel_ddi_fdi_disable(struct drm_crtc *crtc);
 
 #endif /* __INTEL_DRV_H__ */
index 9ba0aae..2ee9821 100644 (file)
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 
+static struct drm_device *intel_hdmi_to_dev(struct intel_hdmi *intel_hdmi)
+{
+       return hdmi_to_dig_port(intel_hdmi)->base.base.dev;
+}
+
 static void
 assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
 {
-       struct drm_device *dev = intel_hdmi->base.base.dev;
+       struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi);
        struct drm_i915_private *dev_priv = dev->dev_private;
        uint32_t enabled_bits;
 
@@ -51,13 +56,14 @@ assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
 
 struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
 {
-       return container_of(encoder, struct intel_hdmi, base.base);
+       struct intel_digital_port *intel_dig_port =
+               container_of(encoder, struct intel_digital_port, base.base);
+       return &intel_dig_port->hdmi;
 }
 
 static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector)
 {
-       return container_of(intel_attached_encoder(connector),
-                           struct intel_hdmi, base);
+       return enc_to_intel_hdmi(&intel_attached_encoder(connector)->base);
 }
 
 void intel_dip_infoframe_csum(struct dip_infoframe *frame)
@@ -334,6 +340,8 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
        if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
                avi_if.body.avi.YQ_CN_PR |= DIP_AVI_PR_2;
 
+       avi_if.body.avi.VIC = drm_mode_cea_vic(adjusted_mode);
+
        intel_set_infoframe(encoder, &avi_if);
 }
 
@@ -754,16 +762,16 @@ static int intel_hdmi_mode_valid(struct drm_connector *connector,
        return MODE_OK;
 }
 
-static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
-                                 const struct drm_display_mode *mode,
-                                 struct drm_display_mode *adjusted_mode)
+bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
+                          const struct drm_display_mode *mode,
+                          struct drm_display_mode *adjusted_mode)
 {
        return true;
 }
 
 static bool g4x_hdmi_connected(struct intel_hdmi *intel_hdmi)
 {
-       struct drm_device *dev = intel_hdmi->base.base.dev;
+       struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi);
        struct drm_i915_private *dev_priv = dev->dev_private;
        uint32_t bit;
 
@@ -786,6 +794,9 @@ static enum drm_connector_status
 intel_hdmi_detect(struct drm_connector *connector, bool force)
 {
        struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+       struct intel_digital_port *intel_dig_port =
+               hdmi_to_dig_port(intel_hdmi);
+       struct intel_encoder *intel_encoder = &intel_dig_port->base;
        struct drm_i915_private *dev_priv = connector->dev->dev_private;
        struct edid *edid;
        enum drm_connector_status status = connector_status_disconnected;
@@ -814,6 +825,7 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
                if (intel_hdmi->force_audio != HDMI_AUDIO_AUTO)
                        intel_hdmi->has_audio =
                                (intel_hdmi->force_audio == HDMI_AUDIO_ON);
+               intel_encoder->type = INTEL_OUTPUT_HDMI;
        }
 
        return status;
@@ -859,10 +871,12 @@ intel_hdmi_set_property(struct drm_connector *connector,
                        uint64_t val)
 {
        struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+       struct intel_digital_port *intel_dig_port =
+               hdmi_to_dig_port(intel_hdmi);
        struct drm_i915_private *dev_priv = connector->dev->dev_private;
        int ret;
 
-       ret = drm_connector_property_set_value(connector, property, val);
+       ret = drm_object_property_set_value(&connector->base, property, val);
        if (ret)
                return ret;
 
@@ -898,8 +912,8 @@ intel_hdmi_set_property(struct drm_connector *connector,
        return -EINVAL;
 
 done:
-       if (intel_hdmi->base.base.crtc) {
-               struct drm_crtc *crtc = intel_hdmi->base.base.crtc;
+       if (intel_dig_port->base.base.crtc) {
+               struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
                intel_set_mode(crtc, &crtc->mode,
                               crtc->x, crtc->y, crtc->fb);
        }
@@ -914,12 +928,6 @@ static void intel_hdmi_destroy(struct drm_connector *connector)
        kfree(connector);
 }
 
-static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs_hsw = {
-       .mode_fixup = intel_hdmi_mode_fixup,
-       .mode_set = intel_ddi_mode_set,
-       .disable = intel_encoder_noop,
-};
-
 static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
        .mode_fixup = intel_hdmi_mode_fixup,
        .mode_set = intel_hdmi_mode_set,
@@ -951,43 +959,24 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
        intel_attach_broadcast_rgb_property(connector);
 }
 
-void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
+void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
+                              struct intel_connector *intel_connector)
 {
+       struct drm_connector *connector = &intel_connector->base;
+       struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
+       struct intel_encoder *intel_encoder = &intel_dig_port->base;
+       struct drm_device *dev = intel_encoder->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_connector *connector;
-       struct intel_encoder *intel_encoder;
-       struct intel_connector *intel_connector;
-       struct intel_hdmi *intel_hdmi;
-
-       intel_hdmi = kzalloc(sizeof(struct intel_hdmi), GFP_KERNEL);
-       if (!intel_hdmi)
-               return;
-
-       intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
-       if (!intel_connector) {
-               kfree(intel_hdmi);
-               return;
-       }
-
-       intel_encoder = &intel_hdmi->base;
-       drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
-                        DRM_MODE_ENCODER_TMDS);
+       enum port port = intel_dig_port->port;
 
-       connector = &intel_connector->base;
        drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
                           DRM_MODE_CONNECTOR_HDMIA);
        drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
 
-       intel_encoder->type = INTEL_OUTPUT_HDMI;
-
        connector->polled = DRM_CONNECTOR_POLL_HPD;
        connector->interlace_allowed = 1;
        connector->doublescan_allowed = 0;
-       intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
 
-       intel_encoder->cloneable = false;
-
-       intel_hdmi->ddi_port = port;
        switch (port) {
        case PORT_B:
                intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
@@ -1007,8 +996,6 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
                BUG();
        }
 
-       intel_hdmi->sdvox_reg = sdvox_reg;
-
        if (!HAS_PCH_SPLIT(dev)) {
                intel_hdmi->write_infoframe = g4x_write_infoframe;
                intel_hdmi->set_infoframes = g4x_set_infoframes;
@@ -1026,21 +1013,10 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
                intel_hdmi->set_infoframes = cpt_set_infoframes;
        }
 
-       if (IS_HASWELL(dev)) {
-               intel_encoder->enable = intel_enable_ddi;
-               intel_encoder->disable = intel_disable_ddi;
-               intel_encoder->get_hw_state = intel_ddi_get_hw_state;
-               drm_encoder_helper_add(&intel_encoder->base,
-                                      &intel_hdmi_helper_funcs_hsw);
-       } else {
-               intel_encoder->enable = intel_enable_hdmi;
-               intel_encoder->disable = intel_disable_hdmi;
-               intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
-               drm_encoder_helper_add(&intel_encoder->base,
-                                      &intel_hdmi_helper_funcs);
-       }
-       intel_connector->get_hw_state = intel_connector_get_hw_state;
-
+       if (IS_HASWELL(dev))
+               intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
+       else
+               intel_connector->get_hw_state = intel_connector_get_hw_state;
 
        intel_hdmi_add_properties(intel_hdmi, connector);
 
@@ -1056,3 +1032,42 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
                I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
        }
 }
+
+void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
+{
+       struct intel_digital_port *intel_dig_port;
+       struct intel_encoder *intel_encoder;
+       struct drm_encoder *encoder;
+       struct intel_connector *intel_connector;
+
+       intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
+       if (!intel_dig_port)
+               return;
+
+       intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+       if (!intel_connector) {
+               kfree(intel_dig_port);
+               return;
+       }
+
+       intel_encoder = &intel_dig_port->base;
+       encoder = &intel_encoder->base;
+
+       drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
+                        DRM_MODE_ENCODER_TMDS);
+       drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
+
+       intel_encoder->enable = intel_enable_hdmi;
+       intel_encoder->disable = intel_disable_hdmi;
+       intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
+
+       intel_encoder->type = INTEL_OUTPUT_HDMI;
+       intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+       intel_encoder->cloneable = false;
+
+       intel_dig_port->port = port;
+       intel_dig_port->hdmi.sdvox_reg = sdvox_reg;
+       intel_dig_port->dp.output_reg = 0;
+
+       intel_hdmi_init_connector(intel_dig_port, intel_connector);
+}
index c2c6dbc..3ef5af1 100644 (file)
@@ -432,7 +432,7 @@ timeout:
        I915_WRITE(GMBUS0 + reg_offset, 0);
 
        /* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */
-       bus->force_bit = true;
+       bus->force_bit = 1;
        ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
 
 out:
@@ -491,7 +491,7 @@ int intel_setup_gmbus(struct drm_device *dev)
 
                /* gmbus seems to be broken on i830 */
                if (IS_I830(dev))
-                       bus->force_bit = true;
+                       bus->force_bit = 1;
 
                intel_gpio_setup(bus, port);
 
@@ -532,7 +532,10 @@ void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
 {
        struct intel_gmbus *bus = to_intel_gmbus(adapter);
 
-       bus->force_bit = force_bit;
+       bus->force_bit += force_bit ? 1 : -1;
+       DRM_DEBUG_KMS("%sabling bit-banging on %s. force bit now %d\n",
+                     force_bit ? "en" : "dis", adapter->name,
+                     bus->force_bit);
 }
 
 void intel_teardown_gmbus(struct drm_device *dev)
index edba93b..b9a660a 100644 (file)
 #include <linux/acpi.h>
 
 /* Private structure for the integrated LVDS support */
-struct intel_lvds {
-       struct intel_encoder base;
+struct intel_lvds_connector {
+       struct intel_connector base;
 
-       struct edid *edid;
+       struct notifier_block lid_notifier;
+};
+
+struct intel_lvds_encoder {
+       struct intel_encoder base;
 
-       int fitting_mode;
        u32 pfit_control;
        u32 pfit_pgm_ratios;
        bool pfit_dirty;
 
-       struct drm_display_mode *fixed_mode;
+       struct intel_lvds_connector *attached_connector;
 };
 
-static struct intel_lvds *to_intel_lvds(struct drm_encoder *encoder)
+static struct intel_lvds_encoder *to_lvds_encoder(struct drm_encoder *encoder)
 {
-       return container_of(encoder, struct intel_lvds, base.base);
+       return container_of(encoder, struct intel_lvds_encoder, base.base);
 }
 
-static struct intel_lvds *intel_attached_lvds(struct drm_connector *connector)
+static struct intel_lvds_connector *to_lvds_connector(struct drm_connector *connector)
 {
-       return container_of(intel_attached_encoder(connector),
-                           struct intel_lvds, base);
+       return container_of(connector, struct intel_lvds_connector, base.base);
 }
 
 static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
@@ -96,7 +98,7 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
 static void intel_enable_lvds(struct intel_encoder *encoder)
 {
        struct drm_device *dev = encoder->base.dev;
-       struct intel_lvds *intel_lvds = to_intel_lvds(&encoder->base);
+       struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 ctl_reg, lvds_reg, stat_reg;
@@ -113,7 +115,7 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
 
        I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN);
 
-       if (intel_lvds->pfit_dirty) {
+       if (lvds_encoder->pfit_dirty) {
                /*
                 * Enable automatic panel scaling so that non-native modes
                 * fill the screen.  The panel fitter should only be
@@ -121,12 +123,12 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
                 * register description and PRM.
                 */
                DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n",
-                             intel_lvds->pfit_control,
-                             intel_lvds->pfit_pgm_ratios);
+                             lvds_encoder->pfit_control,
+                             lvds_encoder->pfit_pgm_ratios);
 
-               I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios);
-               I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control);
-               intel_lvds->pfit_dirty = false;
+               I915_WRITE(PFIT_PGM_RATIOS, lvds_encoder->pfit_pgm_ratios);
+               I915_WRITE(PFIT_CONTROL, lvds_encoder->pfit_control);
+               lvds_encoder->pfit_dirty = false;
        }
 
        I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
@@ -140,7 +142,7 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
 static void intel_disable_lvds(struct intel_encoder *encoder)
 {
        struct drm_device *dev = encoder->base.dev;
-       struct intel_lvds *intel_lvds = to_intel_lvds(&encoder->base);
+       struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 ctl_reg, lvds_reg, stat_reg;
 
@@ -160,9 +162,9 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
        if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000))
                DRM_ERROR("timed out waiting for panel to power off\n");
 
-       if (intel_lvds->pfit_control) {
+       if (lvds_encoder->pfit_control) {
                I915_WRITE(PFIT_CONTROL, 0);
-               intel_lvds->pfit_dirty = true;
+               lvds_encoder->pfit_dirty = true;
        }
 
        I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN);
@@ -172,8 +174,8 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
 static int intel_lvds_mode_valid(struct drm_connector *connector,
                                 struct drm_display_mode *mode)
 {
-       struct intel_lvds *intel_lvds = intel_attached_lvds(connector);
-       struct drm_display_mode *fixed_mode = intel_lvds->fixed_mode;
+       struct intel_connector *intel_connector = to_intel_connector(connector);
+       struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
 
        if (mode->hdisplay > fixed_mode->hdisplay)
                return MODE_PANEL;
@@ -249,8 +251,10 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
 {
        struct drm_device *dev = encoder->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
-       struct intel_crtc *intel_crtc = intel_lvds->base.new_crtc;
+       struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
+       struct intel_connector *intel_connector =
+               &lvds_encoder->attached_connector->base;
+       struct intel_crtc *intel_crtc = lvds_encoder->base.new_crtc;
        u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
        int pipe;
 
@@ -260,7 +264,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
                return false;
        }
 
-       if (intel_encoder_check_is_cloned(&intel_lvds->base))
+       if (intel_encoder_check_is_cloned(&lvds_encoder->base))
                return false;
 
        /*
@@ -269,10 +273,12 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
         * with the panel scaling set up to source from the H/VDisplay
         * of the original mode.
         */
-       intel_fixed_panel_mode(intel_lvds->fixed_mode, adjusted_mode);
+       intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
+                              adjusted_mode);
 
        if (HAS_PCH_SPLIT(dev)) {
-               intel_pch_panel_fitting(dev, intel_lvds->fitting_mode,
+               intel_pch_panel_fitting(dev,
+                                       intel_connector->panel.fitting_mode,
                                        mode, adjusted_mode);
                return true;
        }
@@ -298,7 +304,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
 
        drm_mode_set_crtcinfo(adjusted_mode, 0);
 
-       switch (intel_lvds->fitting_mode) {
+       switch (intel_connector->panel.fitting_mode) {
        case DRM_MODE_SCALE_CENTER:
                /*
                 * For centered modes, we have to calculate border widths &
@@ -396,11 +402,11 @@ out:
        if (INTEL_INFO(dev)->gen < 4 && dev_priv->lvds_dither)
                pfit_control |= PANEL_8TO6_DITHER_ENABLE;
 
-       if (pfit_control != intel_lvds->pfit_control ||
-           pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) {
-               intel_lvds->pfit_control = pfit_control;
-               intel_lvds->pfit_pgm_ratios = pfit_pgm_ratios;
-               intel_lvds->pfit_dirty = true;
+       if (pfit_control != lvds_encoder->pfit_control ||
+           pfit_pgm_ratios != lvds_encoder->pfit_pgm_ratios) {
+               lvds_encoder->pfit_control = pfit_control;
+               lvds_encoder->pfit_pgm_ratios = pfit_pgm_ratios;
+               lvds_encoder->pfit_dirty = true;
        }
        dev_priv->lvds_border_bits = border;
 
@@ -449,14 +455,15 @@ intel_lvds_detect(struct drm_connector *connector, bool force)
  */
 static int intel_lvds_get_modes(struct drm_connector *connector)
 {
-       struct intel_lvds *intel_lvds = intel_attached_lvds(connector);
+       struct intel_lvds_connector *lvds_connector = to_lvds_connector(connector);
        struct drm_device *dev = connector->dev;
        struct drm_display_mode *mode;
 
-       if (intel_lvds->edid)
-               return drm_add_edid_modes(connector, intel_lvds->edid);
+       /* use cached edid if we have one */
+       if (!IS_ERR_OR_NULL(lvds_connector->base.edid))
+               return drm_add_edid_modes(connector, lvds_connector->base.edid);
 
-       mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode);
+       mode = drm_mode_duplicate(dev, lvds_connector->base.panel.fixed_mode);
        if (mode == NULL)
                return 0;
 
@@ -496,10 +503,11 @@ static const struct dmi_system_id intel_no_modeset_on_lid[] = {
 static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
                            void *unused)
 {
-       struct drm_i915_private *dev_priv =
-               container_of(nb, struct drm_i915_private, lid_notifier);
-       struct drm_device *dev = dev_priv->dev;
-       struct drm_connector *connector = dev_priv->int_lvds_connector;
+       struct intel_lvds_connector *lvds_connector =
+               container_of(nb, struct intel_lvds_connector, lid_notifier);
+       struct drm_connector *connector = &lvds_connector->base.base;
+       struct drm_device *dev = connector->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
 
        if (dev->switch_power_state != DRM_SWITCH_POWER_ON)
                return NOTIFY_OK;
@@ -508,9 +516,7 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
         * check and update the status of LVDS connector after receiving
         * the LID nofication event.
         */
-       if (connector)
-               connector->status = connector->funcs->detect(connector,
-                                                            false);
+       connector->status = connector->funcs->detect(connector, false);
 
        /* Don't force modeset on machines where it causes a GPU lockup */
        if (dmi_check_system(intel_no_modeset_on_lid))
@@ -526,7 +532,7 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
        dev_priv->modeset_on_lid = 0;
 
        mutex_lock(&dev->mode_config.mutex);
-       intel_modeset_check_state(dev);
+       intel_modeset_setup_hw_state(dev, true);
        mutex_unlock(&dev->mode_config.mutex);
 
        return NOTIFY_OK;
@@ -541,13 +547,18 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
  */
 static void intel_lvds_destroy(struct drm_connector *connector)
 {
-       struct drm_device *dev = connector->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_lvds_connector *lvds_connector =
+               to_lvds_connector(connector);
 
-       intel_panel_destroy_backlight(dev);
+       if (lvds_connector->lid_notifier.notifier_call)
+               acpi_lid_notifier_unregister(&lvds_connector->lid_notifier);
+
+       if (!IS_ERR_OR_NULL(lvds_connector->base.edid))
+               kfree(lvds_connector->base.edid);
+
+       intel_panel_destroy_backlight(connector->dev);
+       intel_panel_fini(&lvds_connector->base.panel);
 
-       if (dev_priv->lid_notifier.notifier_call)
-               acpi_lid_notifier_unregister(&dev_priv->lid_notifier);
        drm_sysfs_connector_remove(connector);
        drm_connector_cleanup(connector);
        kfree(connector);
@@ -557,22 +568,24 @@ static int intel_lvds_set_property(struct drm_connector *connector,
                                   struct drm_property *property,
                                   uint64_t value)
 {
-       struct intel_lvds *intel_lvds = intel_attached_lvds(connector);
+       struct intel_connector *intel_connector = to_intel_connector(connector);
        struct drm_device *dev = connector->dev;
 
        if (property == dev->mode_config.scaling_mode_property) {
-               struct drm_crtc *crtc = intel_lvds->base.base.crtc;
+               struct drm_crtc *crtc;
 
                if (value == DRM_MODE_SCALE_NONE) {
                        DRM_DEBUG_KMS("no scaling not supported\n");
                        return -EINVAL;
                }
 
-               if (intel_lvds->fitting_mode == value) {
+               if (intel_connector->panel.fitting_mode == value) {
                        /* the LVDS scaling property is not changed */
                        return 0;
                }
-               intel_lvds->fitting_mode = value;
+               intel_connector->panel.fitting_mode = value;
+
+               crtc = intel_attached_encoder(connector)->base.crtc;
                if (crtc && crtc->enabled) {
                        /*
                         * If the CRTC is enabled, the display will be changed
@@ -912,12 +925,15 @@ static bool intel_lvds_supported(struct drm_device *dev)
 bool intel_lvds_init(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_lvds *intel_lvds;
+       struct intel_lvds_encoder *lvds_encoder;
        struct intel_encoder *intel_encoder;
+       struct intel_lvds_connector *lvds_connector;
        struct intel_connector *intel_connector;
        struct drm_connector *connector;
        struct drm_encoder *encoder;
        struct drm_display_mode *scan; /* *modes, *bios_mode; */
+       struct drm_display_mode *fixed_mode = NULL;
+       struct edid *edid;
        struct drm_crtc *crtc;
        u32 lvds;
        int pipe;
@@ -945,23 +961,25 @@ bool intel_lvds_init(struct drm_device *dev)
                }
        }
 
-       intel_lvds = kzalloc(sizeof(struct intel_lvds), GFP_KERNEL);
-       if (!intel_lvds) {
+       lvds_encoder = kzalloc(sizeof(struct intel_lvds_encoder), GFP_KERNEL);
+       if (!lvds_encoder)
                return false;
-       }
 
-       intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
-       if (!intel_connector) {
-               kfree(intel_lvds);
+       lvds_connector = kzalloc(sizeof(struct intel_lvds_connector), GFP_KERNEL);
+       if (!lvds_connector) {
+               kfree(lvds_encoder);
                return false;
        }
 
+       lvds_encoder->attached_connector = lvds_connector;
+
        if (!HAS_PCH_SPLIT(dev)) {
-               intel_lvds->pfit_control = I915_READ(PFIT_CONTROL);
+               lvds_encoder->pfit_control = I915_READ(PFIT_CONTROL);
        }
 
-       intel_encoder = &intel_lvds->base;
+       intel_encoder = &lvds_encoder->base;
        encoder = &intel_encoder->base;
+       intel_connector = &lvds_connector->base;
        connector = &intel_connector->base;
        drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs,
                           DRM_MODE_CONNECTOR_LVDS);
@@ -993,14 +1011,10 @@ bool intel_lvds_init(struct drm_device *dev)
 
        /* create the scaling mode property */
        drm_mode_create_scaling_mode_property(dev);
-       /*
-        * the initial panel fitting mode will be FULL_SCREEN.
-        */
-
-       drm_connector_attach_property(&intel_connector->base,
+       drm_object_attach_property(&connector->base,
                                      dev->mode_config.scaling_mode_property,
                                      DRM_MODE_SCALE_ASPECT);
-       intel_lvds->fitting_mode = DRM_MODE_SCALE_ASPECT;
+       intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
        /*
         * LVDS discovery:
         * 1) check for EDID on DDC
@@ -1015,20 +1029,21 @@ bool intel_lvds_init(struct drm_device *dev)
         * Attempt to get the fixed panel mode from DDC.  Assume that the
         * preferred mode is the right one.
         */
-       intel_lvds->edid = drm_get_edid(connector,
-                                       intel_gmbus_get_adapter(dev_priv,
-                                                               pin));
-       if (intel_lvds->edid) {
-               if (drm_add_edid_modes(connector,
-                                      intel_lvds->edid)) {
+       edid = drm_get_edid(connector, intel_gmbus_get_adapter(dev_priv, pin));
+       if (edid) {
+               if (drm_add_edid_modes(connector, edid)) {
                        drm_mode_connector_update_edid_property(connector,
-                                                               intel_lvds->edid);
+                                                               edid);
                } else {
-                       kfree(intel_lvds->edid);
-                       intel_lvds->edid = NULL;
+                       kfree(edid);
+                       edid = ERR_PTR(-EINVAL);
                }
+       } else {
+               edid = ERR_PTR(-ENOENT);
        }
-       if (!intel_lvds->edid) {
+       lvds_connector->base.edid = edid;
+
+       if (IS_ERR_OR_NULL(edid)) {
                /* Didn't get an EDID, so
                 * Set wide sync ranges so we get all modes
                 * handed to valid_mode for checking
@@ -1041,22 +1056,26 @@ bool intel_lvds_init(struct drm_device *dev)
 
        list_for_each_entry(scan, &connector->probed_modes, head) {
                if (scan->type & DRM_MODE_TYPE_PREFERRED) {
-                       intel_lvds->fixed_mode =
-                               drm_mode_duplicate(dev, scan);
-                       intel_find_lvds_downclock(dev,
-                                                 intel_lvds->fixed_mode,
-                                                 connector);
-                       goto out;
+                       DRM_DEBUG_KMS("using preferred mode from EDID: ");
+                       drm_mode_debug_printmodeline(scan);
+
+                       fixed_mode = drm_mode_duplicate(dev, scan);
+                       if (fixed_mode) {
+                               intel_find_lvds_downclock(dev, fixed_mode,
+                                                         connector);
+                               goto out;
+                       }
                }
        }
 
        /* Failed to get EDID, what about VBT? */
        if (dev_priv->lfp_lvds_vbt_mode) {
-               intel_lvds->fixed_mode =
-                       drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
-               if (intel_lvds->fixed_mode) {
-                       intel_lvds->fixed_mode->type |=
-                               DRM_MODE_TYPE_PREFERRED;
+               DRM_DEBUG_KMS("using mode from VBT: ");
+               drm_mode_debug_printmodeline(dev_priv->lfp_lvds_vbt_mode);
+
+               fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
+               if (fixed_mode) {
+                       fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
                        goto out;
                }
        }
@@ -1076,16 +1095,17 @@ bool intel_lvds_init(struct drm_device *dev)
        crtc = intel_get_crtc_for_pipe(dev, pipe);
 
        if (crtc && (lvds & LVDS_PORT_EN)) {
-               intel_lvds->fixed_mode = intel_crtc_mode_get(dev, crtc);
-               if (intel_lvds->fixed_mode) {
-                       intel_lvds->fixed_mode->type |=
-                               DRM_MODE_TYPE_PREFERRED;
+               fixed_mode = intel_crtc_mode_get(dev, crtc);
+               if (fixed_mode) {
+                       DRM_DEBUG_KMS("using current (BIOS) mode: ");
+                       drm_mode_debug_printmodeline(fixed_mode);
+                       fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
                        goto out;
                }
        }
 
        /* If we still don't have a mode after all that, give up. */
-       if (!intel_lvds->fixed_mode)
+       if (!fixed_mode)
                goto failed;
 
 out:
@@ -1100,16 +1120,15 @@ out:
                I915_WRITE(PP_CONTROL,
                           I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
        }
-       dev_priv->lid_notifier.notifier_call = intel_lid_notify;
-       if (acpi_lid_notifier_register(&dev_priv->lid_notifier)) {
+       lvds_connector->lid_notifier.notifier_call = intel_lid_notify;
+       if (acpi_lid_notifier_register(&lvds_connector->lid_notifier)) {
                DRM_DEBUG_KMS("lid notifier registration failed\n");
-               dev_priv->lid_notifier.notifier_call = NULL;
+               lvds_connector->lid_notifier.notifier_call = NULL;
        }
-       /* keep the LVDS connector */
-       dev_priv->int_lvds_connector = connector;
        drm_sysfs_connector_add(connector);
 
-       intel_panel_setup_backlight(dev);
+       intel_panel_init(&intel_connector->panel, fixed_mode);
+       intel_panel_setup_backlight(connector);
 
        return true;
 
@@ -1117,7 +1136,9 @@ failed:
        DRM_DEBUG_KMS("No LVDS modes found, disabling.\n");
        drm_connector_cleanup(connector);
        drm_encoder_cleanup(encoder);
-       kfree(intel_lvds);
-       kfree(intel_connector);
+       if (fixed_mode)
+               drm_mode_destroy(dev, fixed_mode);
+       kfree(lvds_encoder);
+       kfree(lvds_connector);
        return false;
 }
index cabd84b..b00f1c8 100644 (file)
@@ -45,7 +45,6 @@ int intel_connector_update_modes(struct drm_connector *connector,
        drm_mode_connector_update_edid_property(connector, edid);
        ret = drm_add_edid_modes(connector, edid);
        drm_edid_to_eld(connector, edid);
-       kfree(edid);
 
        return ret;
 }
@@ -61,12 +60,16 @@ int intel_ddc_get_modes(struct drm_connector *connector,
                        struct i2c_adapter *adapter)
 {
        struct edid *edid;
+       int ret;
 
        edid = drm_get_edid(connector, adapter);
        if (!edid)
                return 0;
 
-       return intel_connector_update_modes(connector, edid);
+       ret = intel_connector_update_modes(connector, edid);
+       kfree(edid);
+
+       return ret;
 }
 
 static const struct drm_prop_enum_list force_audio_names[] = {
@@ -94,7 +97,7 @@ intel_attach_force_audio_property(struct drm_connector *connector)
 
                dev_priv->force_audio_property = prop;
        }
-       drm_connector_attach_property(connector, prop, 0);
+       drm_object_attach_property(&connector->base, prop, 0);
 }
 
 static const struct drm_prop_enum_list broadcast_rgb_names[] = {
@@ -121,5 +124,5 @@ intel_attach_broadcast_rgb_property(struct drm_connector *connector)
                dev_priv->broadcast_rgb_property = prop;
        }
 
-       drm_connector_attach_property(connector, prop, 0);
+       drm_object_attach_property(&connector->base, prop, 0);
 }
index 5530413..7741c22 100644 (file)
@@ -154,6 +154,8 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
        struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
        u32 max;
 
+       DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
+
        if (!(bclp & ASLE_BCLP_VALID))
                return ASLE_BACKLIGHT_FAILED;
 
index e2aacd3..bee8cb6 100644 (file)
@@ -130,32 +130,34 @@ static int is_backlight_combination_mode(struct drm_device *dev)
        return 0;
 }
 
-static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv)
+static u32 i915_read_blc_pwm_ctl(struct drm_device *dev)
 {
+       struct drm_i915_private *dev_priv = dev->dev_private;
        u32 val;
 
        /* Restore the CTL value if it lost, e.g. GPU reset */
 
        if (HAS_PCH_SPLIT(dev_priv->dev)) {
                val = I915_READ(BLC_PWM_PCH_CTL2);
-               if (dev_priv->saveBLC_PWM_CTL2 == 0) {
-                       dev_priv->saveBLC_PWM_CTL2 = val;
+               if (dev_priv->regfile.saveBLC_PWM_CTL2 == 0) {
+                       dev_priv->regfile.saveBLC_PWM_CTL2 = val;
                } else if (val == 0) {
-                       I915_WRITE(BLC_PWM_PCH_CTL2,
-                                  dev_priv->saveBLC_PWM_CTL2);
-                       val = dev_priv->saveBLC_PWM_CTL2;
+                       val = dev_priv->regfile.saveBLC_PWM_CTL2;
+                       I915_WRITE(BLC_PWM_PCH_CTL2, val);
                }
        } else {
                val = I915_READ(BLC_PWM_CTL);
-               if (dev_priv->saveBLC_PWM_CTL == 0) {
-                       dev_priv->saveBLC_PWM_CTL = val;
-                       dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
+               if (dev_priv->regfile.saveBLC_PWM_CTL == 0) {
+                       dev_priv->regfile.saveBLC_PWM_CTL = val;
+                       if (INTEL_INFO(dev)->gen >= 4)
+                               dev_priv->regfile.saveBLC_PWM_CTL2 =
+                                       I915_READ(BLC_PWM_CTL2);
                } else if (val == 0) {
-                       I915_WRITE(BLC_PWM_CTL,
-                                  dev_priv->saveBLC_PWM_CTL);
-                       I915_WRITE(BLC_PWM_CTL2,
-                                  dev_priv->saveBLC_PWM_CTL2);
-                       val = dev_priv->saveBLC_PWM_CTL;
+                       val = dev_priv->regfile.saveBLC_PWM_CTL;
+                       I915_WRITE(BLC_PWM_CTL, val);
+                       if (INTEL_INFO(dev)->gen >= 4)
+                               I915_WRITE(BLC_PWM_CTL2,
+                                          dev_priv->regfile.saveBLC_PWM_CTL2);
                }
        }
 
@@ -164,10 +166,9 @@ static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv)
 
 static u32 _intel_panel_get_max_backlight(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        u32 max;
 
-       max = i915_read_blc_pwm_ctl(dev_priv);
+       max = i915_read_blc_pwm_ctl(dev);
 
        if (HAS_PCH_SPLIT(dev)) {
                max >>= 16;
@@ -275,7 +276,7 @@ static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level
        }
 
        tmp = I915_READ(BLC_PWM_CTL);
-       if (INTEL_INFO(dev)->gen < 4) 
+       if (INTEL_INFO(dev)->gen < 4)
                level <<= 1;
        tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
        I915_WRITE(BLC_PWM_CTL, tmp | level);
@@ -374,26 +375,23 @@ static void intel_panel_init_backlight(struct drm_device *dev)
 enum drm_connector_status
 intel_panel_detect(struct drm_device *dev)
 {
-#if 0
        struct drm_i915_private *dev_priv = dev->dev_private;
-#endif
-
-       if (i915_panel_ignore_lid)
-               return i915_panel_ignore_lid > 0 ?
-                       connector_status_connected :
-                       connector_status_disconnected;
 
-       /* opregion lid state on HP 2540p is wrong at boot up,
-        * appears to be either the BIOS or Linux ACPI fault */
-#if 0
        /* Assume that the BIOS does not lie through the OpRegion... */
-       if (dev_priv->opregion.lid_state)
+       if (!i915_panel_ignore_lid && dev_priv->opregion.lid_state) {
                return ioread32(dev_priv->opregion.lid_state) & 0x1 ?
                        connector_status_connected :
                        connector_status_disconnected;
-#endif
+       }
 
-       return connector_status_unknown;
+       switch (i915_panel_ignore_lid) {
+       case -2:
+               return connector_status_connected;
+       case -1:
+               return connector_status_disconnected;
+       default:
+               return connector_status_unknown;
+       }
 }
 
 #ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
@@ -416,21 +414,14 @@ static const struct backlight_ops intel_panel_bl_ops = {
        .get_brightness = intel_panel_get_brightness,
 };
 
-int intel_panel_setup_backlight(struct drm_device *dev)
+int intel_panel_setup_backlight(struct drm_connector *connector)
 {
+       struct drm_device *dev = connector->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct backlight_properties props;
-       struct drm_connector *connector;
 
        intel_panel_init_backlight(dev);
 
-       if (dev_priv->int_lvds_connector)
-               connector = dev_priv->int_lvds_connector;
-       else if (dev_priv->int_edp_connector)
-               connector = dev_priv->int_edp_connector;
-       else
-               return -ENODEV;
-
        memset(&props, 0, sizeof(props));
        props.type = BACKLIGHT_RAW;
        props.max_brightness = _intel_panel_get_max_backlight(dev);
@@ -460,9 +451,9 @@ void intel_panel_destroy_backlight(struct drm_device *dev)
                backlight_device_unregister(dev_priv->backlight);
 }
 #else
-int intel_panel_setup_backlight(struct drm_device *dev)
+int intel_panel_setup_backlight(struct drm_connector *connector)
 {
-       intel_panel_init_backlight(dev);
+       intel_panel_init_backlight(connector->dev);
        return 0;
 }
 
@@ -471,3 +462,20 @@ void intel_panel_destroy_backlight(struct drm_device *dev)
        return;
 }
 #endif
+
+int intel_panel_init(struct intel_panel *panel,
+                    struct drm_display_mode *fixed_mode)
+{
+       panel->fixed_mode = fixed_mode;
+
+       return 0;
+}
+
+void intel_panel_fini(struct intel_panel *panel)
+{
+       struct intel_connector *intel_connector =
+               container_of(panel, struct intel_connector, panel);
+
+       if (panel->fixed_mode)
+               drm_mode_destroy(intel_connector->base.dev, panel->fixed_mode);
+}
index 442968f..496caa7 100644 (file)
@@ -1325,10 +1325,11 @@ static void valleyview_update_wm(struct drm_device *dev)
                   (planeb_wm << DSPFW_PLANEB_SHIFT) |
                   planea_wm);
        I915_WRITE(DSPFW2,
-                  (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
+                  (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
                   (cursora_wm << DSPFW_CURSORA_SHIFT));
        I915_WRITE(DSPFW3,
-                  (I915_READ(DSPFW3) | (cursor_sr << DSPFW_CURSOR_SR_SHIFT)));
+                  (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
+                  (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
 }
 
 static void g4x_update_wm(struct drm_device *dev)
@@ -1374,11 +1375,11 @@ static void g4x_update_wm(struct drm_device *dev)
                   (planeb_wm << DSPFW_PLANEB_SHIFT) |
                   planea_wm);
        I915_WRITE(DSPFW2,
-                  (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
+                  (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
                   (cursora_wm << DSPFW_CURSORA_SHIFT));
        /* HPLL off in SR has some issues on G4x... disable it */
        I915_WRITE(DSPFW3,
-                  (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
+                  (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
                   (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
 }
 
@@ -1468,9 +1469,12 @@ static void i9xx_update_wm(struct drm_device *dev)
        fifo_size = dev_priv->display.get_fifo_size(dev, 0);
        crtc = intel_get_crtc_for_plane(dev, 0);
        if (crtc->enabled && crtc->fb) {
+               int cpp = crtc->fb->bits_per_pixel / 8;
+               if (IS_GEN2(dev))
+                       cpp = 4;
+
                planea_wm = intel_calculate_wm(crtc->mode.clock,
-                                              wm_info, fifo_size,
-                                              crtc->fb->bits_per_pixel / 8,
+                                              wm_info, fifo_size, cpp,
                                               latency_ns);
                enabled = crtc;
        } else
@@ -1479,9 +1483,12 @@ static void i9xx_update_wm(struct drm_device *dev)
        fifo_size = dev_priv->display.get_fifo_size(dev, 1);
        crtc = intel_get_crtc_for_plane(dev, 1);
        if (crtc->enabled && crtc->fb) {
+               int cpp = crtc->fb->bits_per_pixel / 8;
+               if (IS_GEN2(dev))
+                       cpp = 4;
+
                planeb_wm = intel_calculate_wm(crtc->mode.clock,
-                                              wm_info, fifo_size,
-                                              crtc->fb->bits_per_pixel / 8,
+                                              wm_info, fifo_size, cpp,
                                               latency_ns);
                if (enabled == NULL)
                        enabled = crtc;
@@ -1571,8 +1578,7 @@ static void i830_update_wm(struct drm_device *dev)
 
        planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
                                       dev_priv->display.get_fifo_size(dev, 0),
-                                      crtc->fb->bits_per_pixel / 8,
-                                      latency_ns);
+                                      4, latency_ns);
        fwater_lo = I915_READ(FW_BLC) & ~0xfff;
        fwater_lo |= (3<<8) | planea_wm;
 
@@ -2323,7 +2329,7 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 limits = gen6_rps_limits(dev_priv, &val);
 
-       WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+       WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
        WARN_ON(val > dev_priv->rps.max_delay);
        WARN_ON(val < dev_priv->rps.min_delay);
 
@@ -2398,12 +2404,12 @@ static void gen6_enable_rps(struct drm_device *dev)
        struct intel_ring_buffer *ring;
        u32 rp_state_cap;
        u32 gt_perf_status;
-       u32 pcu_mbox, rc6_mask = 0;
+       u32 rc6vids, pcu_mbox, rc6_mask = 0;
        u32 gtfifodbg;
        int rc6_mode;
-       int i;
+       int i, ret;
 
-       WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+       WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
 
        /* Here begins a magic sequence of register writes to enable
         * auto-downclocking.
@@ -2497,30 +2503,16 @@ static void gen6_enable_rps(struct drm_device *dev)
                   GEN6_RP_UP_BUSY_AVG |
                   (IS_HASWELL(dev) ? GEN7_RP_DOWN_IDLE_AVG : GEN6_RP_DOWN_IDLE_CONT));
 
-       if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
-                    500))
-               DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
-
-       I915_WRITE(GEN6_PCODE_DATA, 0);
-       I915_WRITE(GEN6_PCODE_MAILBOX,
-                  GEN6_PCODE_READY |
-                  GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
-       if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
-                    500))
-               DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
-
-       /* Check for overclock support */
-       if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
-                    500))
-               DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
-       I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS);
-       pcu_mbox = I915_READ(GEN6_PCODE_DATA);
-       if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
-                    500))
-               DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
-       if (pcu_mbox & (1<<31)) { /* OC supported */
-               dev_priv->rps.max_delay = pcu_mbox & 0xff;
-               DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
+       ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
+       if (!ret) {
+               pcu_mbox = 0;
+               ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
+               if (ret && pcu_mbox & (1<<31)) { /* OC supported */
+                       dev_priv->rps.max_delay = pcu_mbox & 0xff;
+                       DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
+               }
+       } else {
+               DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
        }
 
        gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8);
@@ -2534,6 +2526,20 @@ static void gen6_enable_rps(struct drm_device *dev)
        /* enable all PM interrupts */
        I915_WRITE(GEN6_PMINTRMSK, 0);
 
+       rc6vids = 0;
+       ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
+       if (IS_GEN6(dev) && ret) {
+               DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
+       } else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
+               DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
+                         GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
+               rc6vids &= 0xffff00;
+               rc6vids |= GEN6_ENCODE_RC6_VID(450);
+               ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
+               if (ret)
+                       DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
+       }
+
        gen6_gt_force_wake_put(dev_priv);
 }
 
@@ -2541,10 +2547,11 @@ static void gen6_update_ring_freq(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        int min_freq = 15;
-       int gpu_freq, ia_freq, max_ia_freq;
+       int gpu_freq;
+       unsigned int ia_freq, max_ia_freq;
        int scaling_factor = 180;
 
-       WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+       WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
 
        max_ia_freq = cpufreq_quick_get_max(0);
        /*
@@ -2575,17 +2582,11 @@ static void gen6_update_ring_freq(struct drm_device *dev)
                else
                        ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
                ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
+               ia_freq <<= GEN6_PCODE_FREQ_IA_RATIO_SHIFT;
 
-               I915_WRITE(GEN6_PCODE_DATA,
-                          (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) |
-                          gpu_freq);
-               I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
-                          GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
-               if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
-                             GEN6_PCODE_READY) == 0, 10)) {
-                       DRM_ERROR("pcode write of freq table timed out\n");
-                       continue;
-               }
+               sandybridge_pcode_write(dev_priv,
+                                       GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
+                                       ia_freq | gpu_freq);
        }
 }
 
@@ -2593,16 +2594,16 @@ void ironlake_teardown_rc6(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       if (dev_priv->renderctx) {
-               i915_gem_object_unpin(dev_priv->renderctx);
-               drm_gem_object_unreference(&dev_priv->renderctx->base);
-               dev_priv->renderctx = NULL;
+       if (dev_priv->ips.renderctx) {
+               i915_gem_object_unpin(dev_priv->ips.renderctx);
+               drm_gem_object_unreference(&dev_priv->ips.renderctx->base);
+               dev_priv->ips.renderctx = NULL;
        }
 
-       if (dev_priv->pwrctx) {
-               i915_gem_object_unpin(dev_priv->pwrctx);
-               drm_gem_object_unreference(&dev_priv->pwrctx->base);
-               dev_priv->pwrctx = NULL;
+       if (dev_priv->ips.pwrctx) {
+               i915_gem_object_unpin(dev_priv->ips.pwrctx);
+               drm_gem_object_unreference(&dev_priv->ips.pwrctx->base);
+               dev_priv->ips.pwrctx = NULL;
        }
 }
 
@@ -2628,14 +2629,14 @@ static int ironlake_setup_rc6(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       if (dev_priv->renderctx == NULL)
-               dev_priv->renderctx = intel_alloc_context_page(dev);
-       if (!dev_priv->renderctx)
+       if (dev_priv->ips.renderctx == NULL)
+               dev_priv->ips.renderctx = intel_alloc_context_page(dev);
+       if (!dev_priv->ips.renderctx)
                return -ENOMEM;
 
-       if (dev_priv->pwrctx == NULL)
-               dev_priv->pwrctx = intel_alloc_context_page(dev);
-       if (!dev_priv->pwrctx) {
+       if (dev_priv->ips.pwrctx == NULL)
+               dev_priv->ips.pwrctx = intel_alloc_context_page(dev);
+       if (!dev_priv->ips.pwrctx) {
                ironlake_teardown_rc6(dev);
                return -ENOMEM;
        }
@@ -2647,6 +2648,7 @@ static void ironlake_enable_rc6(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+       bool was_interruptible;
        int ret;
 
        /* rc6 disabled by default due to repeated reports of hanging during
@@ -2661,6 +2663,9 @@ static void ironlake_enable_rc6(struct drm_device *dev)
        if (ret)
                return;
 
+       was_interruptible = dev_priv->mm.interruptible;
+       dev_priv->mm.interruptible = false;
+
        /*
         * GPU can automatically power down the render unit if given a page
         * to save state.
@@ -2668,12 +2673,13 @@ static void ironlake_enable_rc6(struct drm_device *dev)
        ret = intel_ring_begin(ring, 6);
        if (ret) {
                ironlake_teardown_rc6(dev);
+               dev_priv->mm.interruptible = was_interruptible;
                return;
        }
 
        intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
        intel_ring_emit(ring, MI_SET_CONTEXT);
-       intel_ring_emit(ring, dev_priv->renderctx->gtt_offset |
+       intel_ring_emit(ring, dev_priv->ips.renderctx->gtt_offset |
                        MI_MM_SPACE_GTT |
                        MI_SAVE_EXT_STATE_EN |
                        MI_RESTORE_EXT_STATE_EN |
@@ -2688,14 +2694,15 @@ static void ironlake_enable_rc6(struct drm_device *dev)
         * does an implicit flush, combined with MI_FLUSH above, it should be
         * safe to assume that renderctx is valid
         */
-       ret = intel_wait_ring_idle(ring);
+       ret = intel_ring_idle(ring);
+       dev_priv->mm.interruptible = was_interruptible;
        if (ret) {
                DRM_ERROR("failed to enable ironlake power power savings\n");
                ironlake_teardown_rc6(dev);
                return;
        }
 
-       I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN);
+       I915_WRITE(PWRCTXA, dev_priv->ips.pwrctx->gtt_offset | PWRCTX_EN);
        I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
 }
 
@@ -3304,37 +3311,72 @@ static void intel_init_emon(struct drm_device *dev)
 
 void intel_disable_gt_powersave(struct drm_device *dev)
 {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
        if (IS_IRONLAKE_M(dev)) {
                ironlake_disable_drps(dev);
                ironlake_disable_rc6(dev);
        } else if (INTEL_INFO(dev)->gen >= 6 && !IS_VALLEYVIEW(dev)) {
+               cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
+               mutex_lock(&dev_priv->rps.hw_lock);
                gen6_disable_rps(dev);
+               mutex_unlock(&dev_priv->rps.hw_lock);
        }
 }
 
+static void intel_gen6_powersave_work(struct work_struct *work)
+{
+       struct drm_i915_private *dev_priv =
+               container_of(work, struct drm_i915_private,
+                            rps.delayed_resume_work.work);
+       struct drm_device *dev = dev_priv->dev;
+
+       mutex_lock(&dev_priv->rps.hw_lock);
+       gen6_enable_rps(dev);
+       gen6_update_ring_freq(dev);
+       mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
 void intel_enable_gt_powersave(struct drm_device *dev)
 {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
        if (IS_IRONLAKE_M(dev)) {
                ironlake_enable_drps(dev);
                ironlake_enable_rc6(dev);
                intel_init_emon(dev);
        } else if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) {
-               gen6_enable_rps(dev);
-               gen6_update_ring_freq(dev);
+               /*
+                * PCU communication is slow and this doesn't need to be
+                * done at any specific time, so do this out of our fast path
+                * to make resume and init faster.
+                */
+               schedule_delayed_work(&dev_priv->rps.delayed_resume_work,
+                                     round_jiffies_up_relative(HZ));
        }
 }
 
+static void ibx_init_clock_gating(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       /*
+        * On Ibex Peak and Cougar Point, we need to disable clock
+        * gating for the panel power sequencer or it will fail to
+        * start up when no ports are active.
+        */
+       I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
+}
+
 static void ironlake_init_clock_gating(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
+       uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
 
        /* Required for FBC */
-       dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE |
-               DPFCRUNIT_CLOCK_GATE_DISABLE |
-               DPFDUNIT_CLOCK_GATE_DISABLE;
-       /* Required for CxSR */
-       dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
+       dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
+                  ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
+                  ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
 
        I915_WRITE(PCH_3DCGDIS0,
                   MARIUNIT_CLOCK_GATE_DISABLE |
@@ -3342,8 +3384,6 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
        I915_WRITE(PCH_3DCGDIS1,
                   VFMUNIT_CLOCK_GATE_DISABLE);
 
-       I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
-
        /*
         * According to the spec the following bits should be set in
         * order to enable memory self-refresh
@@ -3354,9 +3394,7 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
        I915_WRITE(ILK_DISPLAY_CHICKEN2,
                   (I915_READ(ILK_DISPLAY_CHICKEN2) |
                    ILK_DPARB_GATE | ILK_VSDPFD_FULL));
-       I915_WRITE(ILK_DSPCLK_GATE,
-                  (I915_READ(ILK_DSPCLK_GATE) |
-                   ILK_DPARB_CLK_GATE));
+       dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
        I915_WRITE(DISP_ARB_CTL,
                   (I915_READ(DISP_ARB_CTL) |
                    DISP_FBC_WM_DIS));
@@ -3378,28 +3416,56 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
                I915_WRITE(ILK_DISPLAY_CHICKEN2,
                           I915_READ(ILK_DISPLAY_CHICKEN2) |
                           ILK_DPARB_GATE);
-               I915_WRITE(ILK_DSPCLK_GATE,
-                          I915_READ(ILK_DSPCLK_GATE) |
-                          ILK_DPFC_DIS1 |
-                          ILK_DPFC_DIS2 |
-                          ILK_CLK_FBC);
        }
 
+       I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
+
        I915_WRITE(ILK_DISPLAY_CHICKEN2,
                   I915_READ(ILK_DISPLAY_CHICKEN2) |
                   ILK_ELPIN_409_SELECT);
        I915_WRITE(_3D_CHICKEN2,
                   _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
                   _3D_CHICKEN2_WM_READ_PIPELINED);
+
+       /* WaDisableRenderCachePipelinedFlush */
+       I915_WRITE(CACHE_MODE_0,
+                  _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
+
+       ibx_init_clock_gating(dev);
+}
+
+static void cpt_init_clock_gating(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int pipe;
+
+       /*
+        * On Ibex Peak and Cougar Point, we need to disable clock
+        * gating for the panel power sequencer or it will fail to
+        * start up when no ports are active.
+        */
+       I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
+       I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
+                  DPLS_EDP_PPS_FIX_DIS);
+       /* The below fixes the weird display corruption, a few pixels shifted
+        * downward, on (only) LVDS of some HP laptops with IVY.
+        */
+       for_each_pipe(pipe)
+               I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_CHICKEN2_TIMING_OVERRIDE);
+       /* WADP0ClockGatingDisable */
+       for_each_pipe(pipe) {
+               I915_WRITE(TRANS_CHICKEN1(pipe),
+                          TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
+       }
 }
 
 static void gen6_init_clock_gating(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        int pipe;
-       uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
+       uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
 
-       I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
+       I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
 
        I915_WRITE(ILK_DISPLAY_CHICKEN2,
                   I915_READ(ILK_DISPLAY_CHICKEN2) |
@@ -3454,11 +3520,12 @@ static void gen6_init_clock_gating(struct drm_device *dev)
        I915_WRITE(ILK_DISPLAY_CHICKEN2,
                   I915_READ(ILK_DISPLAY_CHICKEN2) |
                   ILK_DPARB_GATE | ILK_VSDPFD_FULL);
-       I915_WRITE(ILK_DSPCLK_GATE,
-                  I915_READ(ILK_DSPCLK_GATE) |
-                  ILK_DPARB_CLK_GATE  |
-                  ILK_DPFD_CLK_GATE);
+       I915_WRITE(ILK_DSPCLK_GATE_D,
+                  I915_READ(ILK_DSPCLK_GATE_D) |
+                  ILK_DPARBUNIT_CLOCK_GATE_ENABLE  |
+                  ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
 
+       /* WaMbcDriverBootEnable */
        I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
                   GEN6_MBCTL_ENABLE_BOOT_FETCH);
 
@@ -3473,6 +3540,8 @@ static void gen6_init_clock_gating(struct drm_device *dev)
         * platforms I checked have a 0 for this. (Maybe BIOS overrides?) */
        I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_DISABLE(0xffff));
        I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_ENABLE(GEN6_GT_MODE_HI));
+
+       cpt_init_clock_gating(dev);
 }
 
 static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
@@ -3487,13 +3556,24 @@ static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
        I915_WRITE(GEN7_FF_THREAD_MODE, reg);
 }
 
+static void lpt_init_clock_gating(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       /*
+        * TODO: this bit should only be enabled when really needed, then
+        * disabled when not needed anymore in order to save power.
+        */
+       if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
+               I915_WRITE(SOUTH_DSPCLK_GATE_D,
+                          I915_READ(SOUTH_DSPCLK_GATE_D) |
+                          PCH_LP_PARTITION_LEVEL_DISABLE);
+}
+
 static void haswell_init_clock_gating(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        int pipe;
-       uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
-
-       I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
 
        I915_WRITE(WM3_LP_ILK, 0);
        I915_WRITE(WM2_LP_ILK, 0);
@@ -3504,12 +3584,6 @@ static void haswell_init_clock_gating(struct drm_device *dev)
         */
        I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
 
-       I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
-
-       I915_WRITE(IVB_CHICKEN3,
-                  CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
-                  CHICKEN3_DGMG_DONE_FIX_DISABLE);
-
        /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
        I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
                   GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
@@ -3538,6 +3612,10 @@ static void haswell_init_clock_gating(struct drm_device *dev)
        I915_WRITE(CACHE_MODE_1,
                   _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
 
+       /* WaMbcDriverBootEnable */
+       I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
+                  GEN6_MBCTL_ENABLE_BOOT_FETCH);
+
        /* XXX: This is a workaround for early silicon revisions and should be
         * removed later.
         */
@@ -3547,27 +3625,38 @@ static void haswell_init_clock_gating(struct drm_device *dev)
                        WM_DBG_DISALLOW_SPRITE |
                        WM_DBG_DISALLOW_MAXFIFO);
 
+       lpt_init_clock_gating(dev);
 }
 
 static void ivybridge_init_clock_gating(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        int pipe;
-       uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
        uint32_t snpcr;
 
-       I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
-
        I915_WRITE(WM3_LP_ILK, 0);
        I915_WRITE(WM2_LP_ILK, 0);
        I915_WRITE(WM1_LP_ILK, 0);
 
-       I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
+       I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
+
+       /* WaDisableEarlyCull */
+       I915_WRITE(_3D_CHICKEN3,
+                  _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
 
+       /* WaDisableBackToBackFlipFix */
        I915_WRITE(IVB_CHICKEN3,
                   CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
                   CHICKEN3_DGMG_DONE_FIX_DISABLE);
 
+       /* WaDisablePSDDualDispatchEnable */
+       if (IS_IVB_GT1(dev))
+               I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
+                          _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
+       else
+               I915_WRITE(GEN7_HALF_SLICE_CHICKEN1_GT2,
+                          _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
+
        /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
        I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
                   GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
@@ -3576,7 +3665,18 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
        I915_WRITE(GEN7_L3CNTLREG1,
                        GEN7_WA_FOR_GEN7_L3_CONTROL);
        I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
-                       GEN7_WA_L3_CHICKEN_MODE);
+                  GEN7_WA_L3_CHICKEN_MODE);
+       if (IS_IVB_GT1(dev))
+               I915_WRITE(GEN7_ROW_CHICKEN2,
+                          _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
+       else
+               I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
+                          _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
+
+
+       /* WaForceL3Serialization */
+       I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
+                  ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
 
        /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
         * gating disable must be set.  Failure to set it results in
@@ -3607,6 +3707,7 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
                intel_flush_display_plane(dev_priv, pipe);
        }
 
+       /* WaMbcDriverBootEnable */
        I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
                   GEN6_MBCTL_ENABLE_BOOT_FETCH);
 
@@ -3620,39 +3721,59 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
        snpcr &= ~GEN6_MBC_SNPCR_MASK;
        snpcr |= GEN6_MBC_SNPCR_MED;
        I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
+
+       cpt_init_clock_gating(dev);
 }
 
 static void valleyview_init_clock_gating(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        int pipe;
-       uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
-
-       I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
 
        I915_WRITE(WM3_LP_ILK, 0);
        I915_WRITE(WM2_LP_ILK, 0);
        I915_WRITE(WM1_LP_ILK, 0);
 
-       I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
+       I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
+
+       /* WaDisableEarlyCull */
+       I915_WRITE(_3D_CHICKEN3,
+                  _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
 
+       /* WaDisableBackToBackFlipFix */
        I915_WRITE(IVB_CHICKEN3,
                   CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
                   CHICKEN3_DGMG_DONE_FIX_DISABLE);
 
+       I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
+                  _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
+
        /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
        I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
                   GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
 
        /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
-       I915_WRITE(GEN7_L3CNTLREG1, GEN7_WA_FOR_GEN7_L3_CONTROL);
+       I915_WRITE(GEN7_L3CNTLREG1, I915_READ(GEN7_L3CNTLREG1) | GEN7_L3AGDIS);
        I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE);
 
+       /* WaForceL3Serialization */
+       I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
+                  ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
+
+       /* WaDisableDopClockGating */
+       I915_WRITE(GEN7_ROW_CHICKEN2,
+                  _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
+
+       /* WaForceL3Serialization */
+       I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
+                  ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
+
        /* This is required by WaCatErrorRejectionIssue */
        I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
                   I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
                   GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
 
+       /* WaMbcDriverBootEnable */
        I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
                   GEN6_MBCTL_ENABLE_BOOT_FETCH);
 
@@ -3704,6 +3825,13 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
                   PIPEA_HLINE_INT_EN | PIPEA_VBLANK_INT_EN |
                   SPRITEB_FLIPDONE_INT_EN | SPRITEA_FLIPDONE_INT_EN |
                   PLANEA_FLIPDONE_INT_EN);
+
+       /*
+        * WaDisableVLVClockGating_VBIIssue
+        * Disable clock gating on th GCFG unit to prevent a delay
+        * in the reporting of vblank events.
+        */
+       I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
 }
 
 static void g4x_init_clock_gating(struct drm_device *dev)
@@ -3722,6 +3850,10 @@ static void g4x_init_clock_gating(struct drm_device *dev)
        if (IS_GM45(dev))
                dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
        I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
+
+       /* WaDisableRenderCachePipelinedFlush */
+       I915_WRITE(CACHE_MODE_0,
+                  _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
 }
 
 static void crestline_init_clock_gating(struct drm_device *dev)
@@ -3777,44 +3909,11 @@ static void i830_init_clock_gating(struct drm_device *dev)
        I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
 }
 
-static void ibx_init_clock_gating(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       /*
-        * On Ibex Peak and Cougar Point, we need to disable clock
-        * gating for the panel power sequencer or it will fail to
-        * start up when no ports are active.
-        */
-       I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
-}
-
-static void cpt_init_clock_gating(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       int pipe;
-
-       /*
-        * On Ibex Peak and Cougar Point, we need to disable clock
-        * gating for the panel power sequencer or it will fail to
-        * start up when no ports are active.
-        */
-       I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
-       I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
-                  DPLS_EDP_PPS_FIX_DIS);
-       /* Without this, mode sets may fail silently on FDI */
-       for_each_pipe(pipe)
-               I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS);
-}
-
 void intel_init_clock_gating(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
        dev_priv->display.init_clock_gating(dev);
-
-       if (dev_priv->display.init_pch_clock_gating)
-               dev_priv->display.init_pch_clock_gating(dev);
 }
 
 /* Starting with Haswell, we have different power wells for
@@ -3840,7 +3939,7 @@ void intel_init_power_wells(struct drm_device *dev)
 
                if ((well & HSW_PWR_WELL_STATE) == 0) {
                        I915_WRITE(power_wells[i], well & HSW_PWR_WELL_ENABLE);
-                       if (wait_for(I915_READ(power_wells[i] & HSW_PWR_WELL_STATE), 20))
+                       if (wait_for((I915_READ(power_wells[i]) & HSW_PWR_WELL_STATE), 20))
                                DRM_ERROR("Error enabling power well %lx\n", power_wells[i]);
                }
        }
@@ -3878,11 +3977,6 @@ void intel_init_pm(struct drm_device *dev)
 
        /* For FIFO watermark updates */
        if (HAS_PCH_SPLIT(dev)) {
-               if (HAS_PCH_IBX(dev))
-                       dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
-               else if (HAS_PCH_CPT(dev))
-                       dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating;
-
                if (IS_GEN5(dev)) {
                        if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
                                dev_priv->display.update_wm = ironlake_update_wm;
@@ -3993,6 +4087,12 @@ static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
                DRM_ERROR("GT thread status wait timed out\n");
 }
 
+static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
+{
+       I915_WRITE_NOTRACE(FORCEWAKE, 0);
+       POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
+}
+
 static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
 {
        u32 forcewake_ack;
@@ -4006,7 +4106,7 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
                            FORCEWAKE_ACK_TIMEOUT_MS))
                DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
 
-       I915_WRITE_NOTRACE(FORCEWAKE, 1);
+       I915_WRITE_NOTRACE(FORCEWAKE, FORCEWAKE_KERNEL);
        POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
 
        if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1),
@@ -4016,6 +4116,12 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
        __gen6_gt_wait_for_thread_c0(dev_priv);
 }
 
+static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
+{
+       I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
+       POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
+}
+
 static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
 {
        u32 forcewake_ack;
@@ -4029,7 +4135,7 @@ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
                            FORCEWAKE_ACK_TIMEOUT_MS))
                DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
 
-       I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1));
+       I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
        POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
 
        if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1),
@@ -4073,7 +4179,7 @@ static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
 
 static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
 {
-       I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1));
+       I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
        /* gen6_gt_check_fifodbg doubles as the POSTING_READ */
        gen6_gt_check_fifodbg(dev_priv);
 }
@@ -4111,13 +4217,18 @@ int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
        return ret;
 }
 
+static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
+{
+       I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(0xffff));
+}
+
 static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
 {
        if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1) == 0,
                            FORCEWAKE_ACK_TIMEOUT_MS))
                DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
 
-       I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(1));
+       I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
 
        if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1),
                            FORCEWAKE_ACK_TIMEOUT_MS))
@@ -4128,49 +4239,89 @@ static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
 
 static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
 {
-       I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(1));
+       I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
        /* The below doubles as a POSTING_READ */
        gen6_gt_check_fifodbg(dev_priv);
 }
 
+void intel_gt_reset(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       if (IS_VALLEYVIEW(dev)) {
+               vlv_force_wake_reset(dev_priv);
+       } else if (INTEL_INFO(dev)->gen >= 6) {
+               __gen6_gt_force_wake_reset(dev_priv);
+               if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+                       __gen6_gt_force_wake_mt_reset(dev_priv);
+       }
+}
+
 void intel_gt_init(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
        spin_lock_init(&dev_priv->gt_lock);
 
+       intel_gt_reset(dev);
+
        if (IS_VALLEYVIEW(dev)) {
                dev_priv->gt.force_wake_get = vlv_force_wake_get;
                dev_priv->gt.force_wake_put = vlv_force_wake_put;
-       } else if (INTEL_INFO(dev)->gen >= 6) {
+       } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
+               dev_priv->gt.force_wake_get = __gen6_gt_force_wake_mt_get;
+               dev_priv->gt.force_wake_put = __gen6_gt_force_wake_mt_put;
+       } else if (IS_GEN6(dev)) {
                dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get;
                dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put;
+       }
+       INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
+                         intel_gen6_powersave_work);
+}
 
-               /* IVB configs may use multi-threaded forcewake */
-               if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
-                       u32 ecobus;
-
-                       /* A small trick here - if the bios hasn't configured
-                        * MT forcewake, and if the device is in RC6, then
-                        * force_wake_mt_get will not wake the device and the
-                        * ECOBUS read will return zero. Which will be
-                        * (correctly) interpreted by the test below as MT
-                        * forcewake being disabled.
-                        */
-                       mutex_lock(&dev->struct_mutex);
-                       __gen6_gt_force_wake_mt_get(dev_priv);
-                       ecobus = I915_READ_NOTRACE(ECOBUS);
-                       __gen6_gt_force_wake_mt_put(dev_priv);
-                       mutex_unlock(&dev->struct_mutex);
-
-                       if (ecobus & FORCEWAKE_MT_ENABLE) {
-                               DRM_DEBUG_KMS("Using MT version of forcewake\n");
-                               dev_priv->gt.force_wake_get =
-                                       __gen6_gt_force_wake_mt_get;
-                               dev_priv->gt.force_wake_put =
-                                       __gen6_gt_force_wake_mt_put;
-                       }
-               }
+int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
+{
+       WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+       if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
+               DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
+               return -EAGAIN;
+       }
+
+       I915_WRITE(GEN6_PCODE_DATA, *val);
+       I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
+
+       if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
+                    500)) {
+               DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
+               return -ETIMEDOUT;
        }
+
+       *val = I915_READ(GEN6_PCODE_DATA);
+       I915_WRITE(GEN6_PCODE_DATA, 0);
+
+       return 0;
 }
 
+int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
+{
+       WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+       if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
+               DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
+               return -EAGAIN;
+       }
+
+       I915_WRITE(GEN6_PCODE_DATA, val);
+       I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
+
+       if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
+                    500)) {
+               DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
+               return -ETIMEDOUT;
+       }
+
+       I915_WRITE(GEN6_PCODE_DATA, 0);
+
+       return 0;
+}
index ecbc5c5..2346b92 100644 (file)
@@ -45,7 +45,7 @@ struct pipe_control {
 
 static inline int ring_space(struct intel_ring_buffer *ring)
 {
-       int space = (ring->head & HEAD_ADDR) - (ring->tail + 8);
+       int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE);
        if (space < 0)
                space += ring->size;
        return space;
@@ -245,7 +245,7 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring,
                /*
                 * TLB invalidate requires a post-sync write.
                 */
-               flags |= PIPE_CONTROL_QW_WRITE;
+               flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
        }
 
        ret = intel_ring_begin(ring, 4);
@@ -555,15 +555,11 @@ static void render_ring_cleanup(struct intel_ring_buffer *ring)
 
 static void
 update_mboxes(struct intel_ring_buffer *ring,
-           u32 seqno,
-           u32 mmio_offset)
+             u32 mmio_offset)
 {
-       intel_ring_emit(ring, MI_SEMAPHORE_MBOX |
-                             MI_SEMAPHORE_GLOBAL_GTT |
-                             MI_SEMAPHORE_REGISTER |
-                             MI_SEMAPHORE_UPDATE);
-       intel_ring_emit(ring, seqno);
+       intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
        intel_ring_emit(ring, mmio_offset);
+       intel_ring_emit(ring, ring->outstanding_lazy_request);
 }
 
 /**
@@ -576,8 +572,7 @@ update_mboxes(struct intel_ring_buffer *ring,
  * This acts like a signal in the canonical semaphore.
  */
 static int
-gen6_add_request(struct intel_ring_buffer *ring,
-                u32 *seqno)
+gen6_add_request(struct intel_ring_buffer *ring)
 {
        u32 mbox1_reg;
        u32 mbox2_reg;
@@ -590,13 +585,11 @@ gen6_add_request(struct intel_ring_buffer *ring,
        mbox1_reg = ring->signal_mbox[0];
        mbox2_reg = ring->signal_mbox[1];
 
-       *seqno = i915_gem_next_request_seqno(ring);
-
-       update_mboxes(ring, *seqno, mbox1_reg);
-       update_mboxes(ring, *seqno, mbox2_reg);
+       update_mboxes(ring, mbox1_reg);
+       update_mboxes(ring, mbox2_reg);
        intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
        intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-       intel_ring_emit(ring, *seqno);
+       intel_ring_emit(ring, ring->outstanding_lazy_request);
        intel_ring_emit(ring, MI_USER_INTERRUPT);
        intel_ring_advance(ring);
 
@@ -653,10 +646,8 @@ do {                                                                       \
 } while (0)
 
 static int
-pc_render_add_request(struct intel_ring_buffer *ring,
-                     u32 *result)
+pc_render_add_request(struct intel_ring_buffer *ring)
 {
-       u32 seqno = i915_gem_next_request_seqno(ring);
        struct pipe_control *pc = ring->private;
        u32 scratch_addr = pc->gtt_offset + 128;
        int ret;
@@ -677,7 +668,7 @@ pc_render_add_request(struct intel_ring_buffer *ring,
                        PIPE_CONTROL_WRITE_FLUSH |
                        PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
        intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
-       intel_ring_emit(ring, seqno);
+       intel_ring_emit(ring, ring->outstanding_lazy_request);
        intel_ring_emit(ring, 0);
        PIPE_CONTROL_FLUSH(ring, scratch_addr);
        scratch_addr += 128; /* write to separate cachelines */
@@ -696,11 +687,10 @@ pc_render_add_request(struct intel_ring_buffer *ring,
                        PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
                        PIPE_CONTROL_NOTIFY);
        intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
-       intel_ring_emit(ring, seqno);
+       intel_ring_emit(ring, ring->outstanding_lazy_request);
        intel_ring_emit(ring, 0);
        intel_ring_advance(ring);
 
-       *result = seqno;
        return 0;
 }
 
@@ -888,25 +878,20 @@ bsd_ring_flush(struct intel_ring_buffer *ring,
 }
 
 static int
-i9xx_add_request(struct intel_ring_buffer *ring,
-                u32 *result)
+i9xx_add_request(struct intel_ring_buffer *ring)
 {
-       u32 seqno;
        int ret;
 
        ret = intel_ring_begin(ring, 4);
        if (ret)
                return ret;
 
-       seqno = i915_gem_next_request_seqno(ring);
-
        intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
        intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-       intel_ring_emit(ring, seqno);
+       intel_ring_emit(ring, ring->outstanding_lazy_request);
        intel_ring_emit(ring, MI_USER_INTERRUPT);
        intel_ring_advance(ring);
 
-       *result = seqno;
        return 0;
 }
 
@@ -964,7 +949,9 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
 }
 
 static int
-i965_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
+i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
+                        u32 offset, u32 length,
+                        unsigned flags)
 {
        int ret;
 
@@ -975,7 +962,7 @@ i965_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
        intel_ring_emit(ring,
                        MI_BATCH_BUFFER_START |
                        MI_BATCH_GTT |
-                       MI_BATCH_NON_SECURE_I965);
+                       (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
        intel_ring_emit(ring, offset);
        intel_ring_advance(ring);
 
@@ -984,7 +971,8 @@ i965_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
 
 static int
 i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
-                               u32 offset, u32 len)
+                               u32 offset, u32 len,
+                               unsigned flags)
 {
        int ret;
 
@@ -993,7 +981,7 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
                return ret;
 
        intel_ring_emit(ring, MI_BATCH_BUFFER);
-       intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
+       intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
        intel_ring_emit(ring, offset + len - 8);
        intel_ring_emit(ring, 0);
        intel_ring_advance(ring);
@@ -1003,7 +991,8 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
 
 static int
 i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
-                               u32 offset, u32 len)
+                        u32 offset, u32 len,
+                        unsigned flags)
 {
        int ret;
 
@@ -1012,7 +1001,7 @@ i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
                return ret;
 
        intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
-       intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
+       intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
        intel_ring_advance(ring);
 
        return 0;
@@ -1075,6 +1064,29 @@ err:
        return ret;
 }
 
+static int init_phys_hws_pga(struct intel_ring_buffer *ring)
+{
+       struct drm_i915_private *dev_priv = ring->dev->dev_private;
+       u32 addr;
+
+       if (!dev_priv->status_page_dmah) {
+               dev_priv->status_page_dmah =
+                       drm_pci_alloc(ring->dev, PAGE_SIZE, PAGE_SIZE);
+               if (!dev_priv->status_page_dmah)
+                       return -ENOMEM;
+       }
+
+       addr = dev_priv->status_page_dmah->busaddr;
+       if (INTEL_INFO(ring->dev)->gen >= 4)
+               addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
+       I915_WRITE(HWS_PGA, addr);
+
+       ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
+       memset(ring->status_page.page_addr, 0, PAGE_SIZE);
+
+       return 0;
+}
+
 static int intel_init_ring_buffer(struct drm_device *dev,
                                  struct intel_ring_buffer *ring)
 {
@@ -1086,6 +1098,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
        INIT_LIST_HEAD(&ring->active_list);
        INIT_LIST_HEAD(&ring->request_list);
        ring->size = 32 * PAGE_SIZE;
+       memset(ring->sync_seqno, 0, sizeof(ring->sync_seqno));
 
        init_waitqueue_head(&ring->irq_queue);
 
@@ -1093,6 +1106,11 @@ static int intel_init_ring_buffer(struct drm_device *dev,
                ret = init_status_page(ring);
                if (ret)
                        return ret;
+       } else {
+               BUG_ON(ring->id != RCS);
+               ret = init_phys_hws_pga(ring);
+               if (ret)
+                       return ret;
        }
 
        obj = i915_gem_alloc_object(dev, ring->size);
@@ -1157,7 +1175,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
 
        /* Disable the ring buffer. The ring must be idle at this point */
        dev_priv = ring->dev->dev_private;
-       ret = intel_wait_ring_idle(ring);
+       ret = intel_ring_idle(ring);
        if (ret)
                DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
                          ring->name, ret);
@@ -1176,28 +1194,6 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
        cleanup_status_page(ring);
 }
 
-static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
-{
-       uint32_t __iomem *virt;
-       int rem = ring->size - ring->tail;
-
-       if (ring->space < rem) {
-               int ret = intel_wait_ring_buffer(ring, rem);
-               if (ret)
-                       return ret;
-       }
-
-       virt = ring->virtual_start + ring->tail;
-       rem /= 4;
-       while (rem--)
-               iowrite32(MI_NOOP, virt++);
-
-       ring->tail = 0;
-       ring->space = ring_space(ring);
-
-       return 0;
-}
-
 static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
 {
        int ret;
@@ -1231,7 +1227,7 @@ static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
                if (request->tail == -1)
                        continue;
 
-               space = request->tail - (ring->tail + 8);
+               space = request->tail - (ring->tail + I915_RING_FREE_SPACE);
                if (space < 0)
                        space += ring->size;
                if (space >= n) {
@@ -1266,7 +1262,7 @@ static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
        return 0;
 }
 
-int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
+static int ring_wait_for_space(struct intel_ring_buffer *ring, int n)
 {
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1309,6 +1305,60 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
        return -EBUSY;
 }
 
+static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
+{
+       uint32_t __iomem *virt;
+       int rem = ring->size - ring->tail;
+
+       if (ring->space < rem) {
+               int ret = ring_wait_for_space(ring, rem);
+               if (ret)
+                       return ret;
+       }
+
+       virt = ring->virtual_start + ring->tail;
+       rem /= 4;
+       while (rem--)
+               iowrite32(MI_NOOP, virt++);
+
+       ring->tail = 0;
+       ring->space = ring_space(ring);
+
+       return 0;
+}
+
+int intel_ring_idle(struct intel_ring_buffer *ring)
+{
+       u32 seqno;
+       int ret;
+
+       /* We need to add any requests required to flush the objects and ring */
+       if (ring->outstanding_lazy_request) {
+               ret = i915_add_request(ring, NULL, NULL);
+               if (ret)
+                       return ret;
+       }
+
+       /* Wait upon the last request to be completed */
+       if (list_empty(&ring->request_list))
+               return 0;
+
+       seqno = list_entry(ring->request_list.prev,
+                          struct drm_i915_gem_request,
+                          list)->seqno;
+
+       return i915_wait_seqno(ring, seqno);
+}
+
+static int
+intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
+{
+       if (ring->outstanding_lazy_request)
+               return 0;
+
+       return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_request);
+}
+
 int intel_ring_begin(struct intel_ring_buffer *ring,
                     int num_dwords)
 {
@@ -1320,6 +1370,11 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
        if (ret)
                return ret;
 
+       /* Preallocate the olr before touching the ring */
+       ret = intel_ring_alloc_seqno(ring);
+       if (ret)
+               return ret;
+
        if (unlikely(ring->tail + n > ring->effective_size)) {
                ret = intel_wrap_ring_buffer(ring);
                if (unlikely(ret))
@@ -1327,7 +1382,7 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
        }
 
        if (unlikely(ring->space < n)) {
-               ret = intel_wait_ring_buffer(ring, n);
+               ret = ring_wait_for_space(ring, n);
                if (unlikely(ret))
                        return ret;
        }
@@ -1391,10 +1446,17 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring,
                return ret;
 
        cmd = MI_FLUSH_DW;
+       /*
+        * Bspec vol 1c.5 - video engine command streamer:
+        * "If ENABLED, all TLBs will be invalidated once the flush
+        * operation is complete. This bit is only valid when the
+        * Post-Sync Operation field is a value of 1h or 3h."
+        */
        if (invalidate & I915_GEM_GPU_DOMAINS)
-               cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
+               cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD |
+                       MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
        intel_ring_emit(ring, cmd);
-       intel_ring_emit(ring, 0);
+       intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
        intel_ring_emit(ring, 0);
        intel_ring_emit(ring, MI_NOOP);
        intel_ring_advance(ring);
@@ -1402,8 +1464,30 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring,
 }
 
 static int
+hsw_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
+                             u32 offset, u32 len,
+                             unsigned flags)
+{
+       int ret;
+
+       ret = intel_ring_begin(ring, 2);
+       if (ret)
+               return ret;
+
+       intel_ring_emit(ring,
+                       MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW |
+                       (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_HSW));
+       /* bit0-7 is the length on GEN6+ */
+       intel_ring_emit(ring, offset);
+       intel_ring_advance(ring);
+
+       return 0;
+}
+
+static int
 gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
-                             u32 offset, u32 len)
+                             u32 offset, u32 len,
+                             unsigned flags)
 {
        int ret;
 
@@ -1411,7 +1495,9 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
        if (ret)
                return ret;
 
-       intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
+       intel_ring_emit(ring,
+                       MI_BATCH_BUFFER_START |
+                       (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
        /* bit0-7 is the length on GEN6+ */
        intel_ring_emit(ring, offset);
        intel_ring_advance(ring);
@@ -1432,10 +1518,17 @@ static int blt_ring_flush(struct intel_ring_buffer *ring,
                return ret;
 
        cmd = MI_FLUSH_DW;
+       /*
+        * Bspec vol 1c.3 - blitter engine command streamer:
+        * "If ENABLED, all TLBs will be invalidated once the flush
+        * operation is complete. This bit is only valid when the
+        * Post-Sync Operation field is a value of 1h or 3h."
+        */
        if (invalidate & I915_GEM_DOMAIN_RENDER)
-               cmd |= MI_INVALIDATE_TLB;
+               cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX |
+                       MI_FLUSH_DW_OP_STOREDW;
        intel_ring_emit(ring, cmd);
-       intel_ring_emit(ring, 0);
+       intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
        intel_ring_emit(ring, 0);
        intel_ring_emit(ring, MI_NOOP);
        intel_ring_advance(ring);
@@ -1490,7 +1583,9 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
                ring->irq_enable_mask = I915_USER_INTERRUPT;
        }
        ring->write_tail = ring_write_tail;
-       if (INTEL_INFO(dev)->gen >= 6)
+       if (IS_HASWELL(dev))
+               ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
+       else if (INTEL_INFO(dev)->gen >= 6)
                ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
        else if (INTEL_INFO(dev)->gen >= 4)
                ring->dispatch_execbuffer = i965_dispatch_execbuffer;
@@ -1501,12 +1596,6 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
        ring->init = init_render_ring;
        ring->cleanup = render_ring_cleanup;
 
-
-       if (!I915_NEED_GFX_HWS(dev)) {
-               ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
-               memset(ring->status_page.page_addr, 0, PAGE_SIZE);
-       }
-
        return intel_init_ring_buffer(dev, ring);
 }
 
@@ -1514,6 +1603,7 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+       int ret;
 
        ring->name = "render ring";
        ring->id = RCS;
@@ -1551,16 +1641,13 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
        ring->init = init_render_ring;
        ring->cleanup = render_ring_cleanup;
 
-       if (!I915_NEED_GFX_HWS(dev))
-               ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
-
        ring->dev = dev;
        INIT_LIST_HEAD(&ring->active_list);
        INIT_LIST_HEAD(&ring->request_list);
 
        ring->size = size;
        ring->effective_size = ring->size;
-       if (IS_I830(ring->dev))
+       if (IS_I830(ring->dev) || IS_845G(ring->dev))
                ring->effective_size -= 128;
 
        ring->virtual_start = ioremap_wc(start, size);
@@ -1570,6 +1657,12 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
                return -ENOMEM;
        }
 
+       if (!I915_NEED_GFX_HWS(dev)) {
+               ret = init_phys_hws_pga(ring);
+               if (ret)
+                       return ret;
+       }
+
        return 0;
 }
 
@@ -1618,7 +1711,6 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
        }
        ring->init = init_ring_common;
 
-
        return intel_init_ring_buffer(dev, ring);
 }
 
index 2ea7a31..526182e 100644 (file)
@@ -1,6 +1,17 @@
 #ifndef _INTEL_RINGBUFFER_H_
 #define _INTEL_RINGBUFFER_H_
 
+/*
+ * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
+ * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
+ * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use"
+ *
+ * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same
+ * cacheline, the Head Pointer must not be greater than the Tail
+ * Pointer."
+ */
+#define I915_RING_FREE_SPACE 64
+
 struct  intel_hw_status_page {
        u32             *page_addr;
        unsigned int    gfx_addr;
@@ -70,8 +81,7 @@ struct  intel_ring_buffer {
        int __must_check (*flush)(struct intel_ring_buffer *ring,
                                  u32   invalidate_domains,
                                  u32   flush_domains);
-       int             (*add_request)(struct intel_ring_buffer *ring,
-                                      u32 *seqno);
+       int             (*add_request)(struct intel_ring_buffer *ring);
        /* Some chipsets are not quite as coherent as advertised and need
         * an expensive kick to force a true read of the up-to-date seqno.
         * However, the up-to-date seqno is not always required and the last
@@ -81,7 +91,9 @@ struct  intel_ring_buffer {
        u32             (*get_seqno)(struct intel_ring_buffer *ring,
                                     bool lazy_coherency);
        int             (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
-                                              u32 offset, u32 length);
+                                              u32 offset, u32 length,
+                                              unsigned flags);
+#define I915_DISPATCH_SECURE 0x1
        void            (*cleanup)(struct intel_ring_buffer *ring);
        int             (*sync_to)(struct intel_ring_buffer *ring,
                                   struct intel_ring_buffer *to,
@@ -181,27 +193,21 @@ intel_read_status_page(struct intel_ring_buffer *ring,
  * The area from dword 0x20 to 0x3ff is available for driver usage.
  */
 #define I915_GEM_HWS_INDEX             0x20
+#define I915_GEM_HWS_SCRATCH_INDEX     0x30
+#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
 
 void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
 
-int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n);
-static inline int intel_wait_ring_idle(struct intel_ring_buffer *ring)
-{
-       return intel_wait_ring_buffer(ring, ring->size - 8);
-}
-
 int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
-
 static inline void intel_ring_emit(struct intel_ring_buffer *ring,
                                   u32 data)
 {
        iowrite32(data, ring->virtual_start + ring->tail);
        ring->tail += 4;
 }
-
 void intel_ring_advance(struct intel_ring_buffer *ring);
+int __must_check intel_ring_idle(struct intel_ring_buffer *ring);
 
-u32 intel_ring_get_seqno(struct intel_ring_buffer *ring);
 int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
 int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring);
 
@@ -217,6 +223,12 @@ static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring)
        return ring->tail;
 }
 
+static inline u32 intel_ring_get_seqno(struct intel_ring_buffer *ring)
+{
+       BUG_ON(ring->outstanding_lazy_request == 0);
+       return ring->outstanding_lazy_request;
+}
+
 static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno)
 {
        if (ring->trace_irq_seqno == 0 && ring->irq_get(ring))
index a6ac0b4..c275bf0 100644 (file)
@@ -509,7 +509,7 @@ out:
 static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
                                     void *response, int response_len)
 {
-       u8 retry = 5;
+       u8 retry = 15; /* 5 quick checks, followed by 10 long checks */
        u8 status;
        int i;
 
@@ -522,14 +522,27 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
         * command to be complete.
         *
         * Check 5 times in case the hardware failed to read the docs.
+        *
+        * Also beware that the first response by many devices is to
+        * reply PENDING and stall for time. TVs are notorious for
+        * requiring longer than specified to complete their replies.
+        * Originally (in the DDX long ago), the delay was only ever 15ms
+        * with an additional delay of 30ms applied for TVs added later after
+        * many experiments. To accommodate both sets of delays, we do a
+        * sequence of slow checks if the device is falling behind and fails
+        * to reply within 5*15µs.
         */
        if (!intel_sdvo_read_byte(intel_sdvo,
                                  SDVO_I2C_CMD_STATUS,
                                  &status))
                goto log_fail;
 
-       while (status == SDVO_CMD_STATUS_PENDING && retry--) {
-               udelay(15);
+       while (status == SDVO_CMD_STATUS_PENDING && --retry) {
+               if (retry < 10)
+                       msleep(15);
+               else
+                       udelay(15);
+
                if (!intel_sdvo_read_byte(intel_sdvo,
                                          SDVO_I2C_CMD_STATUS,
                                          &status))
@@ -1228,6 +1241,30 @@ static void intel_disable_sdvo(struct intel_encoder *encoder)
 
        temp = I915_READ(intel_sdvo->sdvo_reg);
        if ((temp & SDVO_ENABLE) != 0) {
+               /* HW workaround for IBX, we need to move the port to
+                * transcoder A before disabling it. */
+               if (HAS_PCH_IBX(encoder->base.dev)) {
+                       struct drm_crtc *crtc = encoder->base.crtc;
+                       int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
+
+                       if (temp & SDVO_PIPE_B_SELECT) {
+                               temp &= ~SDVO_PIPE_B_SELECT;
+                               I915_WRITE(intel_sdvo->sdvo_reg, temp);
+                               POSTING_READ(intel_sdvo->sdvo_reg);
+
+                               /* Again we need to write this twice. */
+                               I915_WRITE(intel_sdvo->sdvo_reg, temp);
+                               POSTING_READ(intel_sdvo->sdvo_reg);
+
+                               /* Transcoder selection bits only update
+                                * effectively on vblank. */
+                               if (crtc)
+                                       intel_wait_for_vblank(encoder->base.dev, pipe);
+                               else
+                                       msleep(50);
+                       }
+               }
+
                intel_sdvo_write_sdvox(intel_sdvo, temp & ~SDVO_ENABLE);
        }
 }
@@ -1244,8 +1281,20 @@ static void intel_enable_sdvo(struct intel_encoder *encoder)
        u8 status;
 
        temp = I915_READ(intel_sdvo->sdvo_reg);
-       if ((temp & SDVO_ENABLE) == 0)
+       if ((temp & SDVO_ENABLE) == 0) {
+               /* HW workaround for IBX, we need to move the port
+                * to transcoder A before disabling it. */
+               if (HAS_PCH_IBX(dev)) {
+                       struct drm_crtc *crtc = encoder->base.crtc;
+                       int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
+
+                       /* Restore the transcoder select bit. */
+                       if (pipe == PIPE_B)
+                               temp |= SDVO_PIPE_B_SELECT;
+               }
+
                intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE);
+       }
        for (i = 0; i < 2; i++)
                intel_wait_for_vblank(dev, intel_crtc->pipe);
 
@@ -1499,15 +1548,9 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
        struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
        enum drm_connector_status ret;
 
-       if (!intel_sdvo_write_cmd(intel_sdvo,
-                                 SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0))
-               return connector_status_unknown;
-
-       /* add 30ms delay when the output type might be TV */
-       if (intel_sdvo->caps.output_flags & SDVO_TV_MASK)
-               msleep(30);
-
-       if (!intel_sdvo_read_response(intel_sdvo, &response, 2))
+       if (!intel_sdvo_get_value(intel_sdvo,
+                                 SDVO_CMD_GET_ATTACHED_DISPLAYS,
+                                 &response, 2))
                return connector_status_unknown;
 
        DRM_DEBUG_KMS("SDVO response %d %d [%x]\n",
@@ -1796,7 +1839,7 @@ static void intel_sdvo_destroy(struct drm_connector *connector)
        intel_sdvo_destroy_enhance_property(connector);
        drm_sysfs_connector_remove(connector);
        drm_connector_cleanup(connector);
-       kfree(connector);
+       kfree(intel_sdvo_connector);
 }
 
 static bool intel_sdvo_detect_hdmi_audio(struct drm_connector *connector)
@@ -1828,7 +1871,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
        uint8_t cmd;
        int ret;
 
-       ret = drm_connector_property_set_value(connector, property, val);
+       ret = drm_object_property_set_value(&connector->base, property, val);
        if (ret)
                return ret;
 
@@ -1883,7 +1926,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
        } else if (IS_TV_OR_LVDS(intel_sdvo_connector)) {
                temp_value = val;
                if (intel_sdvo_connector->left == property) {
-                       drm_connector_property_set_value(connector,
+                       drm_object_property_set_value(&connector->base,
                                                         intel_sdvo_connector->right, val);
                        if (intel_sdvo_connector->left_margin == temp_value)
                                return 0;
@@ -1895,7 +1938,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
                        cmd = SDVO_CMD_SET_OVERSCAN_H;
                        goto set_value;
                } else if (intel_sdvo_connector->right == property) {
-                       drm_connector_property_set_value(connector,
+                       drm_object_property_set_value(&connector->base,
                                                         intel_sdvo_connector->left, val);
                        if (intel_sdvo_connector->right_margin == temp_value)
                                return 0;
@@ -1907,7 +1950,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
                        cmd = SDVO_CMD_SET_OVERSCAN_H;
                        goto set_value;
                } else if (intel_sdvo_connector->top == property) {
-                       drm_connector_property_set_value(connector,
+                       drm_object_property_set_value(&connector->base,
                                                         intel_sdvo_connector->bottom, val);
                        if (intel_sdvo_connector->top_margin == temp_value)
                                return 0;
@@ -1919,7 +1962,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
                        cmd = SDVO_CMD_SET_OVERSCAN_V;
                        goto set_value;
                } else if (intel_sdvo_connector->bottom == property) {
-                       drm_connector_property_set_value(connector,
+                       drm_object_property_set_value(&connector->base,
                                                         intel_sdvo_connector->top, val);
                        if (intel_sdvo_connector->bottom_margin == temp_value)
                                return 0;
@@ -2072,17 +2115,24 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
        else
                mapping = &dev_priv->sdvo_mappings[1];
 
-       pin = GMBUS_PORT_DPB;
-       if (mapping->initialized)
+       if (mapping->initialized && intel_gmbus_is_port_valid(mapping->i2c_pin))
                pin = mapping->i2c_pin;
+       else
+               pin = GMBUS_PORT_DPB;
 
-       if (intel_gmbus_is_port_valid(pin)) {
-               sdvo->i2c = intel_gmbus_get_adapter(dev_priv, pin);
-               intel_gmbus_set_speed(sdvo->i2c, GMBUS_RATE_1MHZ);
-               intel_gmbus_force_bit(sdvo->i2c, true);
-       } else {
-               sdvo->i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB);
-       }
+       sdvo->i2c = intel_gmbus_get_adapter(dev_priv, pin);
+
+       /* With gmbus we should be able to drive sdvo i2c at 2MHz, but somehow
+        * our code totally fails once we start using gmbus. Hence fall back to
+        * bit banging for now. */
+       intel_gmbus_force_bit(sdvo->i2c, true);
+}
+
+/* undo any changes intel_sdvo_select_i2c_bus() did to sdvo->i2c */
+static void
+intel_sdvo_unselect_i2c_bus(struct intel_sdvo *sdvo)
+{
+       intel_gmbus_force_bit(sdvo->i2c, false);
 }
 
 static bool
@@ -2427,7 +2477,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
                                i, tv_format_names[intel_sdvo_connector->tv_format_supported[i]]);
 
        intel_sdvo->tv_format_index = intel_sdvo_connector->tv_format_supported[0];
-       drm_connector_attach_property(&intel_sdvo_connector->base.base,
+       drm_object_attach_property(&intel_sdvo_connector->base.base.base,
                                      intel_sdvo_connector->tv_format, 0);
        return true;
 
@@ -2443,7 +2493,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
                intel_sdvo_connector->name = \
                        drm_property_create_range(dev, 0, #name, 0, data_value[0]); \
                if (!intel_sdvo_connector->name) return false; \
-               drm_connector_attach_property(connector, \
+               drm_object_attach_property(&connector->base, \
                                              intel_sdvo_connector->name, \
                                              intel_sdvo_connector->cur_##name); \
                DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \
@@ -2480,7 +2530,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
                if (!intel_sdvo_connector->left)
                        return false;
 
-               drm_connector_attach_property(connector,
+               drm_object_attach_property(&connector->base,
                                              intel_sdvo_connector->left,
                                              intel_sdvo_connector->left_margin);
 
@@ -2489,7 +2539,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
                if (!intel_sdvo_connector->right)
                        return false;
 
-               drm_connector_attach_property(connector,
+               drm_object_attach_property(&connector->base,
                                              intel_sdvo_connector->right,
                                              intel_sdvo_connector->right_margin);
                DRM_DEBUG_KMS("h_overscan: max %d, "
@@ -2517,7 +2567,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
                if (!intel_sdvo_connector->top)
                        return false;
 
-               drm_connector_attach_property(connector,
+               drm_object_attach_property(&connector->base,
                                              intel_sdvo_connector->top,
                                              intel_sdvo_connector->top_margin);
 
@@ -2527,7 +2577,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
                if (!intel_sdvo_connector->bottom)
                        return false;
 
-               drm_connector_attach_property(connector,
+               drm_object_attach_property(&connector->base,
                                              intel_sdvo_connector->bottom,
                                              intel_sdvo_connector->bottom_margin);
                DRM_DEBUG_KMS("v_overscan: max %d, "
@@ -2559,7 +2609,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
                if (!intel_sdvo_connector->dot_crawl)
                        return false;
 
-               drm_connector_attach_property(connector,
+               drm_object_attach_property(&connector->base,
                                              intel_sdvo_connector->dot_crawl,
                                              intel_sdvo_connector->cur_dot_crawl);
                DRM_DEBUG_KMS("dot crawl: current %d\n", response);
@@ -2663,10 +2713,8 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
        intel_sdvo->is_sdvob = is_sdvob;
        intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, intel_sdvo) >> 1;
        intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo, sdvo_reg);
-       if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev)) {
-               kfree(intel_sdvo);
-               return false;
-       }
+       if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev))
+               goto err_i2c_bus;
 
        /* encoder type will be decided later */
        intel_encoder = &intel_sdvo->base;
@@ -2765,6 +2813,8 @@ err_output:
 err:
        drm_encoder_cleanup(&intel_encoder->base);
        i2c_del_adapter(&intel_sdvo->ddc);
+err_i2c_bus:
+       intel_sdvo_unselect_i2c_bus(intel_sdvo);
        kfree(intel_sdvo);
 
        return false;
index 82f5e5c..827dcd4 100644 (file)
@@ -48,7 +48,8 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
        struct intel_plane *intel_plane = to_intel_plane(plane);
        int pipe = intel_plane->pipe;
        u32 sprctl, sprscale = 0;
-       int pixel_size;
+       unsigned long sprsurf_offset, linear_offset;
+       int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
 
        sprctl = I915_READ(SPRCTL(pipe));
 
@@ -61,33 +62,24 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
        switch (fb->pixel_format) {
        case DRM_FORMAT_XBGR8888:
                sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX;
-               pixel_size = 4;
                break;
        case DRM_FORMAT_XRGB8888:
                sprctl |= SPRITE_FORMAT_RGBX888;
-               pixel_size = 4;
                break;
        case DRM_FORMAT_YUYV:
                sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YUYV;
-               pixel_size = 2;
                break;
        case DRM_FORMAT_YVYU:
                sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YVYU;
-               pixel_size = 2;
                break;
        case DRM_FORMAT_UYVY:
                sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_UYVY;
-               pixel_size = 2;
                break;
        case DRM_FORMAT_VYUY:
                sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_VYUY;
-               pixel_size = 2;
                break;
        default:
-               DRM_DEBUG_DRIVER("bad pixel format, assuming RGBX888\n");
-               sprctl |= SPRITE_FORMAT_RGBX888;
-               pixel_size = 4;
-               break;
+               BUG();
        }
 
        if (obj->tiling_mode != I915_TILING_NONE)
@@ -127,18 +119,28 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
 
        I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
        I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
-       if (obj->tiling_mode != I915_TILING_NONE) {
+
+       linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
+       sprsurf_offset =
+               intel_gen4_compute_offset_xtiled(&x, &y,
+                                                fb->bits_per_pixel / 8,
+                                                fb->pitches[0]);
+       linear_offset -= sprsurf_offset;
+
+       /* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
+        * register */
+       if (IS_HASWELL(dev))
+               I915_WRITE(SPROFFSET(pipe), (y << 16) | x);
+       else if (obj->tiling_mode != I915_TILING_NONE)
                I915_WRITE(SPRTILEOFF(pipe), (y << 16) | x);
-       } else {
-               unsigned long offset;
+       else
+               I915_WRITE(SPRLINOFF(pipe), linear_offset);
 
-               offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
-               I915_WRITE(SPRLINOFF(pipe), offset);
-       }
        I915_WRITE(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
-       I915_WRITE(SPRSCALE(pipe), sprscale);
+       if (intel_plane->can_scale)
+               I915_WRITE(SPRSCALE(pipe), sprscale);
        I915_WRITE(SPRCTL(pipe), sprctl);
-       I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset);
+       I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset + sprsurf_offset);
        POSTING_READ(SPRSURF(pipe));
 }
 
@@ -152,7 +154,8 @@ ivb_disable_plane(struct drm_plane *plane)
 
        I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE);
        /* Can't leave the scaler enabled... */
-       I915_WRITE(SPRSCALE(pipe), 0);
+       if (intel_plane->can_scale)
+               I915_WRITE(SPRSCALE(pipe), 0);
        /* Activate double buffered register update */
        I915_MODIFY_DISPBASE(SPRSURF(pipe), 0);
        POSTING_READ(SPRSURF(pipe));
@@ -225,8 +228,10 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
        struct drm_device *dev = plane->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_plane *intel_plane = to_intel_plane(plane);
-       int pipe = intel_plane->pipe, pixel_size;
+       int pipe = intel_plane->pipe;
+       unsigned long dvssurf_offset, linear_offset;
        u32 dvscntr, dvsscale;
+       int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
 
        dvscntr = I915_READ(DVSCNTR(pipe));
 
@@ -239,33 +244,24 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
        switch (fb->pixel_format) {
        case DRM_FORMAT_XBGR8888:
                dvscntr |= DVS_FORMAT_RGBX888 | DVS_RGB_ORDER_XBGR;
-               pixel_size = 4;
                break;
        case DRM_FORMAT_XRGB8888:
                dvscntr |= DVS_FORMAT_RGBX888;
-               pixel_size = 4;
                break;
        case DRM_FORMAT_YUYV:
                dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YUYV;
-               pixel_size = 2;
                break;
        case DRM_FORMAT_YVYU:
                dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YVYU;
-               pixel_size = 2;
                break;
        case DRM_FORMAT_UYVY:
                dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_UYVY;
-               pixel_size = 2;
                break;
        case DRM_FORMAT_VYUY:
                dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_VYUY;
-               pixel_size = 2;
                break;
        default:
-               DRM_DEBUG_DRIVER("bad pixel format, assuming RGBX888\n");
-               dvscntr |= DVS_FORMAT_RGBX888;
-               pixel_size = 4;
-               break;
+               BUG();
        }
 
        if (obj->tiling_mode != I915_TILING_NONE)
@@ -289,18 +285,23 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
 
        I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
        I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
-       if (obj->tiling_mode != I915_TILING_NONE) {
+
+       linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
+       dvssurf_offset =
+               intel_gen4_compute_offset_xtiled(&x, &y,
+                                                fb->bits_per_pixel / 8,
+                                                fb->pitches[0]);
+       linear_offset -= dvssurf_offset;
+
+       if (obj->tiling_mode != I915_TILING_NONE)
                I915_WRITE(DVSTILEOFF(pipe), (y << 16) | x);
-       } else {
-               unsigned long offset;
+       else
+               I915_WRITE(DVSLINOFF(pipe), linear_offset);
 
-               offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
-               I915_WRITE(DVSLINOFF(pipe), offset);
-       }
        I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
        I915_WRITE(DVSSCALE(pipe), dvsscale);
        I915_WRITE(DVSCNTR(pipe), dvscntr);
-       I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset);
+       I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset + dvssurf_offset);
        POSTING_READ(DVSSURF(pipe));
 }
 
@@ -422,6 +423,8 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
        struct intel_framebuffer *intel_fb;
        struct drm_i915_gem_object *obj, *old_obj;
        int pipe = intel_plane->pipe;
+       enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
+                                                                     pipe);
        int ret = 0;
        int x = src_x >> 16, y = src_y >> 16;
        int primary_w = crtc->mode.hdisplay, primary_h = crtc->mode.vdisplay;
@@ -436,7 +439,7 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
        src_h = src_h >> 16;
 
        /* Pipe must be running... */
-       if (!(I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE))
+       if (!(I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE))
                return -EINVAL;
 
        if (crtc_x >= primary_w || crtc_y >= primary_h)
@@ -446,6 +449,15 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
        if (intel_plane->pipe != intel_crtc->pipe)
                return -EINVAL;
 
+       /* Sprite planes can be linear or x-tiled surfaces */
+       switch (obj->tiling_mode) {
+               case I915_TILING_NONE:
+               case I915_TILING_X:
+                       break;
+               default:
+                       return -EINVAL;
+       }
+
        /*
         * Clamp the width & height into the visible area.  Note we don't
         * try to scale the source if part of the visible region is offscreen.
@@ -473,6 +485,12 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
                goto out;
 
        /*
+        * We may not have a scaler, eg. HSW does not have it any more
+        */
+       if (!intel_plane->can_scale && (crtc_w != src_w || crtc_h != src_h))
+               return -EINVAL;
+
+       /*
         * We can take a larger source and scale it down, but
         * only so much...  16x is the max on SNB.
         */
@@ -665,6 +683,7 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe)
        switch (INTEL_INFO(dev)->gen) {
        case 5:
        case 6:
+               intel_plane->can_scale = true;
                intel_plane->max_downscale = 16;
                intel_plane->update_plane = ilk_update_plane;
                intel_plane->disable_plane = ilk_disable_plane;
@@ -681,6 +700,10 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe)
                break;
 
        case 7:
+               if (IS_HASWELL(dev) || IS_VALLEYVIEW(dev))
+                       intel_plane->can_scale = false;
+               else
+                       intel_plane->can_scale = true;
                intel_plane->max_downscale = 2;
                intel_plane->update_plane = ivb_update_plane;
                intel_plane->disable_plane = ivb_disable_plane;
index 62bb048..ea93520 100644 (file)
@@ -1088,13 +1088,11 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
                int dspcntr_reg = DSPCNTR(intel_crtc->plane);
                int pipeconf = I915_READ(pipeconf_reg);
                int dspcntr = I915_READ(dspcntr_reg);
-               int dspbase_reg = DSPADDR(intel_crtc->plane);
                int xpos = 0x0, ypos = 0x0;
                unsigned int xsize, ysize;
                /* Pipe must be off here */
                I915_WRITE(dspcntr_reg, dspcntr & ~DISPLAY_PLANE_ENABLE);
-               /* Flush the plane changes */
-               I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
+               intel_flush_display_plane(dev_priv, intel_crtc->plane);
 
                /* Wait for vblank for the disable to take effect */
                if (IS_GEN2(dev))
@@ -1123,8 +1121,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
 
                I915_WRITE(pipeconf_reg, pipeconf);
                I915_WRITE(dspcntr_reg, dspcntr);
-               /* Flush the plane changes */
-               I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
+               intel_flush_display_plane(dev_priv, intel_crtc->plane);
        }
 
        j = 0;
@@ -1292,7 +1289,7 @@ static void intel_tv_find_better_format(struct drm_connector *connector)
        }
 
        intel_tv->tv_format = tv_mode->name;
-       drm_connector_property_set_value(connector,
+       drm_object_property_set_value(&connector->base,
                connector->dev->mode_config.tv_mode_property, i);
 }
 
@@ -1446,7 +1443,7 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
        int ret = 0;
        bool changed = false;
 
-       ret = drm_connector_property_set_value(connector, property, val);
+       ret = drm_object_property_set_value(&connector->base, property, val);
        if (ret < 0)
                goto out;
 
@@ -1658,18 +1655,18 @@ intel_tv_init(struct drm_device *dev)
                                      ARRAY_SIZE(tv_modes),
                                      tv_format_names);
 
-       drm_connector_attach_property(connector, dev->mode_config.tv_mode_property,
+       drm_object_attach_property(&connector->base, dev->mode_config.tv_mode_property,
                                   initial_mode);
-       drm_connector_attach_property(connector,
+       drm_object_attach_property(&connector->base,
                                   dev->mode_config.tv_left_margin_property,
                                   intel_tv->margin[TV_MARGIN_LEFT]);
-       drm_connector_attach_property(connector,
+       drm_object_attach_property(&connector->base,
                                   dev->mode_config.tv_top_margin_property,
                                   intel_tv->margin[TV_MARGIN_TOP]);
-       drm_connector_attach_property(connector,
+       drm_object_attach_property(&connector->base,
                                   dev->mode_config.tv_right_margin_property,
                                   intel_tv->margin[TV_MARGIN_RIGHT]);
-       drm_connector_attach_property(connector,
+       drm_object_attach_property(&connector->base,
                                   dev->mode_config.tv_bottom_margin_property,
                                   intel_tv->margin[TV_MARGIN_BOTTOM]);
        drm_sysfs_connector_add(connector);
index d6a1aae..70dd3c5 100644 (file)
@@ -133,6 +133,8 @@ static int mga_vram_init(struct mga_device *mdev)
 {
        void __iomem *mem;
        struct apertures_struct *aper = alloc_apertures(1);
+       if (!aper)
+               return -ENOMEM;
 
        /* BAR 0 is VRAM */
        mdev->mc.vram_base = pci_resource_start(mdev->dev->pdev, 0);
@@ -140,9 +142,9 @@ static int mga_vram_init(struct mga_device *mdev)
 
        aper->ranges[0].base = mdev->mc.vram_base;
        aper->ranges[0].size = mdev->mc.vram_window;
-       aper->count = 1;
 
        remove_conflicting_framebuffers(aper, "mgafb", true);
+       kfree(aper);
 
        if (!request_mem_region(mdev->mc.vram_base, mdev->mc.vram_window,
                                "mgadrmfb_vram")) {
index 1504699..8fc9d92 100644 (file)
@@ -186,11 +186,11 @@ static void mgag200_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_r
 
 static int mgag200_bo_move(struct ttm_buffer_object *bo,
                       bool evict, bool interruptible,
-                      bool no_wait_reserve, bool no_wait_gpu,
+                      bool no_wait_gpu,
                       struct ttm_mem_reg *new_mem)
 {
        int r;
-       r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+       r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
        return r;
 }
 
@@ -355,7 +355,7 @@ int mgag200_bo_create(struct drm_device *dev, int size, int align,
 
        ret = ttm_bo_init(&mdev->ttm.bdev, &mgabo->bo, size,
                          ttm_bo_type_device, &mgabo->placement,
-                         align >> PAGE_SHIFT, 0, false, NULL, acc_size,
+                         align >> PAGE_SHIFT, false, NULL, acc_size,
                          NULL, mgag200_bo_ttm_destroy);
        if (ret)
                return ret;
@@ -382,7 +382,7 @@ int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr)
        mgag200_ttm_placement(bo, pl_flag);
        for (i = 0; i < bo->placement.num_placement; i++)
                bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
-       ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+       ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
        if (ret)
                return ret;
 
@@ -405,7 +405,7 @@ int mgag200_bo_unpin(struct mgag200_bo *bo)
 
        for (i = 0; i < bo->placement.num_placement ; i++)
                bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
-       ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+       ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
        if (ret)
                return ret;
 
@@ -430,7 +430,7 @@ int mgag200_bo_push_sysram(struct mgag200_bo *bo)
        for (i = 0; i < bo->placement.num_placement ; i++)
                bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
 
-       ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+       ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
        if (ret) {
                DRM_ERROR("pushing to VRAM failed\n");
                return ret;
index a990df4..ab25752 100644 (file)
@@ -11,6 +11,7 @@ nouveau-y := core/core/client.o
 nouveau-y += core/core/engctx.o
 nouveau-y += core/core/engine.o
 nouveau-y += core/core/enum.o
+nouveau-y += core/core/falcon.o
 nouveau-y += core/core/gpuobj.o
 nouveau-y += core/core/handle.o
 nouveau-y += core/core/mm.o
@@ -29,6 +30,7 @@ nouveau-y += core/subdev/bios/base.o
 nouveau-y += core/subdev/bios/bit.o
 nouveau-y += core/subdev/bios/conn.o
 nouveau-y += core/subdev/bios/dcb.o
+nouveau-y += core/subdev/bios/disp.o
 nouveau-y += core/subdev/bios/dp.o
 nouveau-y += core/subdev/bios/extdev.o
 nouveau-y += core/subdev/bios/gpio.o
@@ -64,9 +66,19 @@ nouveau-y += core/subdev/devinit/nv50.o
 nouveau-y += core/subdev/fb/base.o
 nouveau-y += core/subdev/fb/nv04.o
 nouveau-y += core/subdev/fb/nv10.o
+nouveau-y += core/subdev/fb/nv1a.o
 nouveau-y += core/subdev/fb/nv20.o
+nouveau-y += core/subdev/fb/nv25.o
 nouveau-y += core/subdev/fb/nv30.o
+nouveau-y += core/subdev/fb/nv35.o
+nouveau-y += core/subdev/fb/nv36.o
 nouveau-y += core/subdev/fb/nv40.o
+nouveau-y += core/subdev/fb/nv41.o
+nouveau-y += core/subdev/fb/nv44.o
+nouveau-y += core/subdev/fb/nv46.o
+nouveau-y += core/subdev/fb/nv47.o
+nouveau-y += core/subdev/fb/nv49.o
+nouveau-y += core/subdev/fb/nv4e.o
 nouveau-y += core/subdev/fb/nv50.o
 nouveau-y += core/subdev/fb/nvc0.o
 nouveau-y += core/subdev/gpio/base.o
@@ -111,7 +123,10 @@ nouveau-y += core/engine/dmaobj/base.o
 nouveau-y += core/engine/dmaobj/nv04.o
 nouveau-y += core/engine/dmaobj/nv50.o
 nouveau-y += core/engine/dmaobj/nvc0.o
+nouveau-y += core/engine/dmaobj/nvd0.o
 nouveau-y += core/engine/bsp/nv84.o
+nouveau-y += core/engine/bsp/nvc0.o
+nouveau-y += core/engine/bsp/nve0.o
 nouveau-y += core/engine/copy/nva3.o
 nouveau-y += core/engine/copy/nvc0.o
 nouveau-y += core/engine/copy/nve0.o
@@ -119,7 +134,21 @@ nouveau-y += core/engine/crypt/nv84.o
 nouveau-y += core/engine/crypt/nv98.o
 nouveau-y += core/engine/disp/nv04.o
 nouveau-y += core/engine/disp/nv50.o
+nouveau-y += core/engine/disp/nv84.o
+nouveau-y += core/engine/disp/nv94.o
+nouveau-y += core/engine/disp/nva0.o
+nouveau-y += core/engine/disp/nva3.o
 nouveau-y += core/engine/disp/nvd0.o
+nouveau-y += core/engine/disp/nve0.o
+nouveau-y += core/engine/disp/dacnv50.o
+nouveau-y += core/engine/disp/hdanva3.o
+nouveau-y += core/engine/disp/hdanvd0.o
+nouveau-y += core/engine/disp/hdminv84.o
+nouveau-y += core/engine/disp/hdminva3.o
+nouveau-y += core/engine/disp/hdminvd0.o
+nouveau-y += core/engine/disp/sornv50.o
+nouveau-y += core/engine/disp/sornv94.o
+nouveau-y += core/engine/disp/sornvd0.o
 nouveau-y += core/engine/disp/vga.o
 nouveau-y += core/engine/fifo/base.o
 nouveau-y += core/engine/fifo/nv04.o
@@ -151,11 +180,14 @@ nouveau-y += core/engine/mpeg/nv40.o
 nouveau-y += core/engine/mpeg/nv50.o
 nouveau-y += core/engine/mpeg/nv84.o
 nouveau-y += core/engine/ppp/nv98.o
+nouveau-y += core/engine/ppp/nvc0.o
 nouveau-y += core/engine/software/nv04.o
 nouveau-y += core/engine/software/nv10.o
 nouveau-y += core/engine/software/nv50.o
 nouveau-y += core/engine/software/nvc0.o
 nouveau-y += core/engine/vp/nv84.o
+nouveau-y += core/engine/vp/nvc0.o
+nouveau-y += core/engine/vp/nve0.o
 
 # drm/core
 nouveau-y += nouveau_drm.o nouveau_chan.o nouveau_dma.o nouveau_fence.o
@@ -166,7 +198,7 @@ nouveau-y += nv04_fence.o nv10_fence.o nv50_fence.o nv84_fence.o nvc0_fence.o
 
 # drm/kms
 nouveau-y += nouveau_bios.o nouveau_fbcon.o nouveau_display.o
-nouveau-y += nouveau_connector.o nouveau_hdmi.o nouveau_dp.o
+nouveau-y += nouveau_connector.o nouveau_dp.o
 nouveau-y += nv04_fbcon.o nv50_fbcon.o nvc0_fbcon.o
 
 # drm/kms/nv04:nv50
@@ -175,9 +207,7 @@ nouveau-y += nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o
 nouveau-y += nv04_crtc.o nv04_display.o nv04_cursor.o
 
 # drm/kms/nv50-
-nouveau-y += nv50_display.o nvd0_display.o
-nouveau-y += nv50_crtc.o nv50_dac.o nv50_sor.o nv50_cursor.o
-nouveau-y += nv50_evo.o
+nouveau-y += nv50_display.o
 
 # drm/pm
 nouveau-y += nouveau_pm.o nouveau_volt.o nouveau_perf.o
index e41b10d..84c71fa 100644 (file)
@@ -189,6 +189,21 @@ nouveau_engctx_fini(struct nouveau_engctx *engctx, bool suspend)
        return nouveau_gpuobj_fini(&engctx->base, suspend);
 }
 
+int
+_nouveau_engctx_ctor(struct nouveau_object *parent,
+                    struct nouveau_object *engine,
+                    struct nouveau_oclass *oclass, void *data, u32 size,
+                    struct nouveau_object **pobject)
+{
+       struct nouveau_engctx *engctx;
+       int ret;
+
+       ret = nouveau_engctx_create(parent, engine, oclass, NULL, 256, 256,
+                                   NVOBJ_FLAG_ZERO_ALLOC, &engctx);
+       *pobject = nv_object(engctx);
+       return ret;
+}
+
 void
 _nouveau_engctx_dtor(struct nouveau_object *object)
 {
diff --git a/drivers/gpu/drm/nouveau/core/core/falcon.c b/drivers/gpu/drm/nouveau/core/core/falcon.c
new file mode 100644 (file)
index 0000000..6b0843c
--- /dev/null
@@ -0,0 +1,247 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <core/falcon.h>
+
+#include <subdev/timer.h>
+
+u32
+_nouveau_falcon_rd32(struct nouveau_object *object, u64 addr)
+{
+       struct nouveau_falcon *falcon = (void *)object;
+       return nv_rd32(falcon, falcon->addr + addr);
+}
+
+void
+_nouveau_falcon_wr32(struct nouveau_object *object, u64 addr, u32 data)
+{
+       struct nouveau_falcon *falcon = (void *)object;
+       nv_wr32(falcon, falcon->addr + addr, data);
+}
+
+int
+_nouveau_falcon_init(struct nouveau_object *object)
+{
+       struct nouveau_device *device = nv_device(object);
+       struct nouveau_falcon *falcon = (void *)object;
+       const struct firmware *fw;
+       char name[32] = "internal";
+       int ret, i;
+       u32 caps;
+
+       /* enable engine, and determine its capabilities */
+       ret = nouveau_engine_init(&falcon->base);
+       if (ret)
+               return ret;
+
+       if (device->chipset <  0xa3 ||
+           device->chipset == 0xaa || device->chipset == 0xac) {
+               falcon->version = 0;
+               falcon->secret  = (falcon->addr == 0x087000) ? 1 : 0;
+       } else {
+               caps = nv_ro32(falcon, 0x12c);
+               falcon->version = (caps & 0x0000000f);
+               falcon->secret  = (caps & 0x00000030) >> 4;
+       }
+
+       caps = nv_ro32(falcon, 0x108);
+       falcon->code.limit = (caps & 0x000001ff) << 8;
+       falcon->data.limit = (caps & 0x0003fe00) >> 1;
+
+       nv_debug(falcon, "falcon version: %d\n", falcon->version);
+       nv_debug(falcon, "secret level: %d\n", falcon->secret);
+       nv_debug(falcon, "code limit: %d\n", falcon->code.limit);
+       nv_debug(falcon, "data limit: %d\n", falcon->data.limit);
+
+       /* wait for 'uc halted' to be signalled before continuing */
+       if (falcon->secret) {
+               nv_wait(falcon, 0x008, 0x00000010, 0x00000010);
+               nv_wo32(falcon, 0x004, 0x00000010);
+       }
+
+       /* disable all interrupts */
+       nv_wo32(falcon, 0x014, 0xffffffff);
+
+       /* no default ucode provided by the engine implementation, try and
+        * locate a "self-bootstrapping" firmware image for the engine
+        */
+       if (!falcon->code.data) {
+               snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03x",
+                        device->chipset, falcon->addr >> 12);
+
+               ret = request_firmware(&fw, name, &device->pdev->dev);
+               if (ret == 0) {
+                       falcon->code.data = kmemdup(fw->data, fw->size, GFP_KERNEL);
+                       falcon->code.size = fw->size;
+                       falcon->data.data = NULL;
+                       falcon->data.size = 0;
+                       release_firmware(fw);
+               }
+
+               falcon->external = true;
+       }
+
+       /* next step is to try and load "static code/data segment" firmware
+        * images for the engine
+        */
+       if (!falcon->code.data) {
+               snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03xd",
+                        device->chipset, falcon->addr >> 12);
+
+               ret = request_firmware(&fw, name, &device->pdev->dev);
+               if (ret) {
+                       nv_error(falcon, "unable to load firmware data\n");
+                       return ret;
+               }
+
+               falcon->data.data = kmemdup(fw->data, fw->size, GFP_KERNEL);
+               falcon->data.size = fw->size;
+               release_firmware(fw);
+               if (!falcon->data.data)
+                       return -ENOMEM;
+
+               snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03xc",
+                        device->chipset, falcon->addr >> 12);
+
+               ret = request_firmware(&fw, name, &device->pdev->dev);
+               if (ret) {
+                       nv_error(falcon, "unable to load firmware code\n");
+                       return ret;
+               }
+
+               falcon->code.data = kmemdup(fw->data, fw->size, GFP_KERNEL);
+               falcon->code.size = fw->size;
+               release_firmware(fw);
+               if (!falcon->code.data)
+                       return -ENOMEM;
+       }
+
+       nv_debug(falcon, "firmware: %s (%s)\n", name, falcon->data.data ?
+                "static code/data segments" : "self-bootstrapping");
+
+       /* ensure any "self-bootstrapping" firmware image is in vram */
+       if (!falcon->data.data && !falcon->core) {
+               ret = nouveau_gpuobj_new(object->parent, NULL,
+                                        falcon->code.size, 256, 0,
+                                       &falcon->core);
+               if (ret) {
+                       nv_error(falcon, "core allocation failed, %d\n", ret);
+                       return ret;
+               }
+
+               for (i = 0; i < falcon->code.size; i += 4)
+                       nv_wo32(falcon->core, i, falcon->code.data[i / 4]);
+       }
+
+       /* upload firmware bootloader (or the full code segments) */
+       if (falcon->core) {
+               if (device->card_type < NV_C0)
+                       nv_wo32(falcon, 0x618, 0x04000000);
+               else
+                       nv_wo32(falcon, 0x618, 0x00000114);
+               nv_wo32(falcon, 0x11c, 0);
+               nv_wo32(falcon, 0x110, falcon->core->addr >> 8);
+               nv_wo32(falcon, 0x114, 0);
+               nv_wo32(falcon, 0x118, 0x00006610);
+       } else {
+               if (falcon->code.size > falcon->code.limit ||
+                   falcon->data.size > falcon->data.limit) {
+                       nv_error(falcon, "ucode exceeds falcon limit(s)\n");
+                       return -EINVAL;
+               }
+
+               if (falcon->version < 3) {
+                       nv_wo32(falcon, 0xff8, 0x00100000);
+                       for (i = 0; i < falcon->code.size / 4; i++)
+                               nv_wo32(falcon, 0xff4, falcon->code.data[i]);
+               } else {
+                       nv_wo32(falcon, 0x180, 0x01000000);
+                       for (i = 0; i < falcon->code.size / 4; i++) {
+                               if ((i & 0x3f) == 0)
+                                       nv_wo32(falcon, 0x188, i >> 6);
+                               nv_wo32(falcon, 0x184, falcon->code.data[i]);
+                       }
+               }
+       }
+
+       /* upload data segment (if necessary), zeroing the remainder */
+       if (falcon->version < 3) {
+               nv_wo32(falcon, 0xff8, 0x00000000);
+               for (i = 0; !falcon->core && i < falcon->data.size / 4; i++)
+                       nv_wo32(falcon, 0xff4, falcon->data.data[i]);
+               for (; i < falcon->data.limit; i += 4)
+                       nv_wo32(falcon, 0xff4, 0x00000000);
+       } else {
+               nv_wo32(falcon, 0x1c0, 0x01000000);
+               for (i = 0; !falcon->core && i < falcon->data.size / 4; i++)
+                       nv_wo32(falcon, 0x1c4, falcon->data.data[i]);
+               for (; i < falcon->data.limit / 4; i++)
+                       nv_wo32(falcon, 0x1c4, 0x00000000);
+       }
+
+       /* start it running */
+       nv_wo32(falcon, 0x10c, 0x00000001); /* BLOCK_ON_FIFO */
+       nv_wo32(falcon, 0x104, 0x00000000); /* ENTRY */
+       nv_wo32(falcon, 0x100, 0x00000002); /* TRIGGER */
+       nv_wo32(falcon, 0x048, 0x00000003); /* FIFO | CHSW */
+       return 0;
+}
+
+int
+_nouveau_falcon_fini(struct nouveau_object *object, bool suspend)
+{
+       struct nouveau_falcon *falcon = (void *)object;
+
+       if (!suspend) {
+               nouveau_gpuobj_ref(NULL, &falcon->core);
+               if (falcon->external) {
+                       kfree(falcon->data.data);
+                       kfree(falcon->code.data);
+                       falcon->code.data = NULL;
+               }
+       }
+
+       nv_mo32(falcon, 0x048, 0x00000003, 0x00000000);
+       nv_wo32(falcon, 0x014, 0xffffffff);
+
+       return nouveau_engine_fini(&falcon->base, suspend);
+}
+
+int
+nouveau_falcon_create_(struct nouveau_object *parent,
+                      struct nouveau_object *engine,
+                      struct nouveau_oclass *oclass, u32 addr, bool enable,
+                      const char *iname, const char *fname,
+                      int length, void **pobject)
+{
+       struct nouveau_falcon *falcon;
+       int ret;
+
+       ret = nouveau_engine_create_(parent, engine, oclass, enable, iname,
+                                    fname, length, pobject);
+       falcon = *pobject;
+       if (ret)
+               return ret;
+
+       falcon->addr = addr;
+       return 0;
+}
index 70586fd..560b221 100644 (file)
@@ -183,7 +183,7 @@ _nouveau_gpuobj_fini(struct nouveau_object *object, bool suspend)
 }
 
 u32
-_nouveau_gpuobj_rd32(struct nouveau_object *object, u32 addr)
+_nouveau_gpuobj_rd32(struct nouveau_object *object, u64 addr)
 {
        struct nouveau_gpuobj *gpuobj = nv_gpuobj(object);
        struct nouveau_ofuncs *pfuncs = nv_ofuncs(gpuobj->parent);
@@ -193,7 +193,7 @@ _nouveau_gpuobj_rd32(struct nouveau_object *object, u32 addr)
 }
 
 void
-_nouveau_gpuobj_wr32(struct nouveau_object *object, u32 addr, u32 data)
+_nouveau_gpuobj_wr32(struct nouveau_object *object, u64 addr, u32 data)
 {
        struct nouveau_gpuobj *gpuobj = nv_gpuobj(object);
        struct nouveau_ofuncs *pfuncs = nv_ofuncs(gpuobj->parent);
index a6d3cd6..0261a11 100644 (file)
@@ -234,15 +234,18 @@ nouveau_mm_init(struct nouveau_mm *mm, u32 offset, u32 length, u32 block)
 int
 nouveau_mm_fini(struct nouveau_mm *mm)
 {
-       struct nouveau_mm_node *node, *heap =
-               list_first_entry(&mm->nodes, struct nouveau_mm_node, nl_entry);
-       int nodes = 0;
+       if (nouveau_mm_initialised(mm)) {
+               struct nouveau_mm_node *node, *heap =
+                       list_first_entry(&mm->nodes, typeof(*heap), nl_entry);
+               int nodes = 0;
+
+               list_for_each_entry(node, &mm->nodes, nl_entry) {
+                       if (WARN_ON(nodes++ == mm->heap_nodes))
+                               return -EBUSY;
+               }
 
-       list_for_each_entry(node, &mm->nodes, nl_entry) {
-               if (WARN_ON(nodes++ == mm->heap_nodes))
-                       return -EBUSY;
+               kfree(heap);
        }
 
-       kfree(heap);
        return 0;
 }
index 66f7dfd..1d9f614 100644 (file)
  * Authors: Ben Skeggs
  */
 
-#include <core/os.h>
-#include <core/class.h>
 #include <core/engctx.h>
+#include <core/class.h>
 
 #include <engine/bsp.h>
 
 struct nv84_bsp_priv {
-       struct nouveau_bsp base;
-};
-
-struct nv84_bsp_chan {
-       struct nouveau_bsp_chan base;
+       struct nouveau_engine base;
 };
 
 /*******************************************************************************
@@ -49,61 +44,16 @@ nv84_bsp_sclass[] = {
  * BSP context
  ******************************************************************************/
 
-static int
-nv84_bsp_context_ctor(struct nouveau_object *parent,
-                     struct nouveau_object *engine,
-                     struct nouveau_oclass *oclass, void *data, u32 size,
-                     struct nouveau_object **pobject)
-{
-       struct nv84_bsp_chan *priv;
-       int ret;
-
-       ret = nouveau_bsp_context_create(parent, engine, oclass, NULL,
-                                        0, 0, 0, &priv);
-       *pobject = nv_object(priv);
-       if (ret)
-               return ret;
-
-       return 0;
-}
-
-static void
-nv84_bsp_context_dtor(struct nouveau_object *object)
-{
-       struct nv84_bsp_chan *priv = (void *)object;
-       nouveau_bsp_context_destroy(&priv->base);
-}
-
-static int
-nv84_bsp_context_init(struct nouveau_object *object)
-{
-       struct nv84_bsp_chan *priv = (void *)object;
-       int ret;
-
-       ret = nouveau_bsp_context_init(&priv->base);
-       if (ret)
-               return ret;
-
-       return 0;
-}
-
-static int
-nv84_bsp_context_fini(struct nouveau_object *object, bool suspend)
-{
-       struct nv84_bsp_chan *priv = (void *)object;
-       return nouveau_bsp_context_fini(&priv->base, suspend);
-}
-
 static struct nouveau_oclass
 nv84_bsp_cclass = {
        .handle = NV_ENGCTX(BSP, 0x84),
        .ofuncs = &(struct nouveau_ofuncs) {
-               .ctor = nv84_bsp_context_ctor,
-               .dtor = nv84_bsp_context_dtor,
-               .init = nv84_bsp_context_init,
-               .fini = nv84_bsp_context_fini,
-               .rd32 = _nouveau_bsp_context_rd32,
-               .wr32 = _nouveau_bsp_context_wr32,
+               .ctor = _nouveau_engctx_ctor,
+               .dtor = _nouveau_engctx_dtor,
+               .init = _nouveau_engctx_init,
+               .fini = _nouveau_engctx_fini,
+               .rd32 = _nouveau_engctx_rd32,
+               .wr32 = _nouveau_engctx_wr32,
        },
 };
 
@@ -111,11 +61,6 @@ nv84_bsp_cclass = {
  * BSP engine/subdev functions
  ******************************************************************************/
 
-static void
-nv84_bsp_intr(struct nouveau_subdev *subdev)
-{
-}
-
 static int
 nv84_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
              struct nouveau_oclass *oclass, void *data, u32 size,
@@ -124,52 +69,25 @@ nv84_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        struct nv84_bsp_priv *priv;
        int ret;
 
-       ret = nouveau_bsp_create(parent, engine, oclass, &priv);
+       ret = nouveau_engine_create(parent, engine, oclass, true,
+                                   "PBSP", "bsp", &priv);
        *pobject = nv_object(priv);
        if (ret)
                return ret;
 
        nv_subdev(priv)->unit = 0x04008000;
-       nv_subdev(priv)->intr = nv84_bsp_intr;
        nv_engine(priv)->cclass = &nv84_bsp_cclass;
        nv_engine(priv)->sclass = nv84_bsp_sclass;
        return 0;
 }
 
-static void
-nv84_bsp_dtor(struct nouveau_object *object)
-{
-       struct nv84_bsp_priv *priv = (void *)object;
-       nouveau_bsp_destroy(&priv->base);
-}
-
-static int
-nv84_bsp_init(struct nouveau_object *object)
-{
-       struct nv84_bsp_priv *priv = (void *)object;
-       int ret;
-
-       ret = nouveau_bsp_init(&priv->base);
-       if (ret)
-               return ret;
-
-       return 0;
-}
-
-static int
-nv84_bsp_fini(struct nouveau_object *object, bool suspend)
-{
-       struct nv84_bsp_priv *priv = (void *)object;
-       return nouveau_bsp_fini(&priv->base, suspend);
-}
-
 struct nouveau_oclass
 nv84_bsp_oclass = {
        .handle = NV_ENGINE(BSP, 0x84),
        .ofuncs = &(struct nouveau_ofuncs) {
                .ctor = nv84_bsp_ctor,
-               .dtor = nv84_bsp_dtor,
-               .init = nv84_bsp_init,
-               .fini = nv84_bsp_fini,
+               .dtor = _nouveau_engine_dtor,
+               .init = _nouveau_engine_init,
+               .fini = _nouveau_engine_fini,
        },
 };
diff --git a/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c
new file mode 100644 (file)
index 0000000..0a5aa6b
--- /dev/null
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2012 Maarten Lankhorst
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Maarten Lankhorst
+ */
+
+#include <core/falcon.h>
+
+#include <engine/bsp.h>
+
+struct nvc0_bsp_priv {
+       struct nouveau_falcon base;
+};
+
+/*******************************************************************************
+ * BSP object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nvc0_bsp_sclass[] = {
+       { 0x90b1, &nouveau_object_ofuncs },
+       {},
+};
+
+/*******************************************************************************
+ * PBSP context
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nvc0_bsp_cclass = {
+       .handle = NV_ENGCTX(BSP, 0xc0),
+       .ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = _nouveau_falcon_context_ctor,
+               .dtor = _nouveau_falcon_context_dtor,
+               .init = _nouveau_falcon_context_init,
+               .fini = _nouveau_falcon_context_fini,
+               .rd32 = _nouveau_falcon_context_rd32,
+               .wr32 = _nouveau_falcon_context_wr32,
+       },
+};
+
+/*******************************************************************************
+ * PBSP engine/subdev functions
+ ******************************************************************************/
+
+static int
+nvc0_bsp_init(struct nouveau_object *object)
+{
+       struct nvc0_bsp_priv *priv = (void *)object;
+       int ret;
+
+       ret = nouveau_falcon_init(&priv->base);
+       if (ret)
+               return ret;
+
+       nv_wr32(priv, 0x084010, 0x0000fff2);
+       nv_wr32(priv, 0x08401c, 0x0000fff2);
+       return 0;
+}
+
+static int
+nvc0_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+             struct nouveau_oclass *oclass, void *data, u32 size,
+             struct nouveau_object **pobject)
+{
+       struct nvc0_bsp_priv *priv;
+       int ret;
+
+       ret = nouveau_falcon_create(parent, engine, oclass, 0x084000, true,
+                                   "PBSP", "bsp", &priv);
+       *pobject = nv_object(priv);
+       if (ret)
+               return ret;
+
+       nv_subdev(priv)->unit = 0x00008000;
+       nv_engine(priv)->cclass = &nvc0_bsp_cclass;
+       nv_engine(priv)->sclass = nvc0_bsp_sclass;
+       return 0;
+}
+
+struct nouveau_oclass
+nvc0_bsp_oclass = {
+       .handle = NV_ENGINE(BSP, 0xc0),
+       .ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = nvc0_bsp_ctor,
+               .dtor = _nouveau_falcon_dtor,
+               .init = nvc0_bsp_init,
+               .fini = _nouveau_falcon_fini,
+               .rd32 = _nouveau_falcon_rd32,
+               .wr32 = _nouveau_falcon_wr32,
+       },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c
new file mode 100644 (file)
index 0000000..d4f23bb
--- /dev/null
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/falcon.h>
+
+#include <engine/bsp.h>
+
+struct nve0_bsp_priv {
+       struct nouveau_falcon base;
+};
+
+/*******************************************************************************
+ * BSP object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nve0_bsp_sclass[] = {
+       { 0x95b1, &nouveau_object_ofuncs },
+       {},
+};
+
+/*******************************************************************************
+ * PBSP context
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nve0_bsp_cclass = {
+       .handle = NV_ENGCTX(BSP, 0xe0),
+       .ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = _nouveau_falcon_context_ctor,
+               .dtor = _nouveau_falcon_context_dtor,
+               .init = _nouveau_falcon_context_init,
+               .fini = _nouveau_falcon_context_fini,
+               .rd32 = _nouveau_falcon_context_rd32,
+               .wr32 = _nouveau_falcon_context_wr32,
+       },
+};
+
+/*******************************************************************************
+ * PBSP engine/subdev functions
+ ******************************************************************************/
+
+static int
+nve0_bsp_init(struct nouveau_object *object)
+{
+       struct nve0_bsp_priv *priv = (void *)object;
+       int ret;
+
+       ret = nouveau_falcon_init(&priv->base);
+       if (ret)
+               return ret;
+
+       nv_wr32(priv, 0x084010, 0x0000fff2);
+       nv_wr32(priv, 0x08401c, 0x0000fff2);
+       return 0;
+}
+
+static int
+nve0_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+             struct nouveau_oclass *oclass, void *data, u32 size,
+             struct nouveau_object **pobject)
+{
+       struct nve0_bsp_priv *priv;
+       int ret;
+
+       ret = nouveau_falcon_create(parent, engine, oclass, 0x084000, true,
+                                   "PBSP", "bsp", &priv);
+       *pobject = nv_object(priv);
+       if (ret)
+               return ret;
+
+       nv_subdev(priv)->unit = 0x00008000;
+       nv_engine(priv)->cclass = &nve0_bsp_cclass;
+       nv_engine(priv)->sclass = nve0_bsp_sclass;
+       return 0;
+}
+
+struct nouveau_oclass
+nve0_bsp_oclass = {
+       .handle = NV_ENGINE(BSP, 0xe0),
+       .ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = nve0_bsp_ctor,
+               .dtor = _nouveau_falcon_dtor,
+               .init = nve0_bsp_init,
+               .fini = _nouveau_falcon_fini,
+               .rd32 = _nouveau_falcon_rd32,
+               .wr32 = _nouveau_falcon_wr32,
+       },
+};
index 4df6da0..283248c 100644 (file)
  * Authors: Ben Skeggs
  */
 
-#include <core/os.h>
-#include <core/enum.h>
+#include <core/falcon.h>
 #include <core/class.h>
-#include <core/engctx.h>
+#include <core/enum.h>
 
 #include <subdev/fb.h>
 #include <subdev/vm.h>
 #include "fuc/nva3.fuc.h"
 
 struct nva3_copy_priv {
-       struct nouveau_copy base;
-};
-
-struct nva3_copy_chan {
-       struct nouveau_copy_chan base;
+       struct nouveau_falcon base;
 };
 
 /*******************************************************************************
@@ -57,34 +52,16 @@ nva3_copy_sclass[] = {
  * PCOPY context
  ******************************************************************************/
 
-static int
-nva3_copy_context_ctor(struct nouveau_object *parent,
-                      struct nouveau_object *engine,
-                      struct nouveau_oclass *oclass, void *data, u32 size,
-                      struct nouveau_object **pobject)
-{
-       struct nva3_copy_chan *priv;
-       int ret;
-
-       ret = nouveau_copy_context_create(parent, engine, oclass, NULL, 256, 0,
-                                         NVOBJ_FLAG_ZERO_ALLOC, &priv);
-       *pobject = nv_object(priv);
-       if (ret)
-               return ret;
-
-       return 0;
-}
-
 static struct nouveau_oclass
 nva3_copy_cclass = {
        .handle = NV_ENGCTX(COPY0, 0xa3),
        .ofuncs = &(struct nouveau_ofuncs) {
-               .ctor = nva3_copy_context_ctor,
-               .dtor = _nouveau_copy_context_dtor,
-               .init = _nouveau_copy_context_init,
-               .fini = _nouveau_copy_context_fini,
-               .rd32 = _nouveau_copy_context_rd32,
-               .wr32 = _nouveau_copy_context_wr32,
+               .ctor = _nouveau_falcon_context_ctor,
+               .dtor = _nouveau_falcon_context_dtor,
+               .init = _nouveau_falcon_context_init,
+               .fini = _nouveau_falcon_context_fini,
+               .rd32 = _nouveau_falcon_context_rd32,
+               .wr32 = _nouveau_falcon_context_wr32,
 
        },
 };
@@ -100,41 +77,40 @@ static const struct nouveau_enum nva3_copy_isr_error_name[] = {
        {}
 };
 
-static void
+void
 nva3_copy_intr(struct nouveau_subdev *subdev)
 {
        struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
        struct nouveau_engine *engine = nv_engine(subdev);
+       struct nouveau_falcon *falcon = (void *)subdev;
        struct nouveau_object *engctx;
-       struct nva3_copy_priv *priv = (void *)subdev;
-       u32 dispatch = nv_rd32(priv, 0x10401c);
-       u32 stat = nv_rd32(priv, 0x104008) & dispatch & ~(dispatch >> 16);
-       u64 inst = nv_rd32(priv, 0x104050) & 0x3fffffff;
-       u32 ssta = nv_rd32(priv, 0x104040) & 0x0000ffff;
-       u32 addr = nv_rd32(priv, 0x104040) >> 16;
+       u32 dispatch = nv_ro32(falcon, 0x01c);
+       u32 stat = nv_ro32(falcon, 0x008) & dispatch & ~(dispatch >> 16);
+       u64 inst = nv_ro32(falcon, 0x050) & 0x3fffffff;
+       u32 ssta = nv_ro32(falcon, 0x040) & 0x0000ffff;
+       u32 addr = nv_ro32(falcon, 0x040) >> 16;
        u32 mthd = (addr & 0x07ff) << 2;
        u32 subc = (addr & 0x3800) >> 11;
-       u32 data = nv_rd32(priv, 0x104044);
+       u32 data = nv_ro32(falcon, 0x044);
        int chid;
 
        engctx = nouveau_engctx_get(engine, inst);
        chid   = pfifo->chid(pfifo, engctx);
 
        if (stat & 0x00000040) {
-               nv_error(priv, "DISPATCH_ERROR [");
+               nv_error(falcon, "DISPATCH_ERROR [");
                nouveau_enum_print(nva3_copy_isr_error_name, ssta);
                printk("] ch %d [0x%010llx] subc %d mthd 0x%04x data 0x%08x\n",
                       chid, inst << 12, subc, mthd, data);
-               nv_wr32(priv, 0x104004, 0x00000040);
+               nv_wo32(falcon, 0x004, 0x00000040);
                stat &= ~0x00000040;
        }
 
        if (stat) {
-               nv_error(priv, "unhandled intr 0x%08x\n", stat);
-               nv_wr32(priv, 0x104004, stat);
+               nv_error(falcon, "unhandled intr 0x%08x\n", stat);
+               nv_wo32(falcon, 0x004, stat);
        }
 
-       nv50_fb_trap(nouveau_fb(priv), 1);
        nouveau_engctx_put(engctx);
 }
 
@@ -154,7 +130,8 @@ nva3_copy_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        struct nva3_copy_priv *priv;
        int ret;
 
-       ret = nouveau_copy_create(parent, engine, oclass, enable, 0, &priv);
+       ret = nouveau_falcon_create(parent, engine, oclass, 0x104000, enable,
+                                   "PCE0", "copy0", &priv);
        *pobject = nv_object(priv);
        if (ret)
                return ret;
@@ -164,59 +141,22 @@ nva3_copy_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        nv_engine(priv)->cclass = &nva3_copy_cclass;
        nv_engine(priv)->sclass = nva3_copy_sclass;
        nv_engine(priv)->tlb_flush = nva3_copy_tlb_flush;
+       nv_falcon(priv)->code.data = nva3_pcopy_code;
+       nv_falcon(priv)->code.size = sizeof(nva3_pcopy_code);
+       nv_falcon(priv)->data.data = nva3_pcopy_data;
+       nv_falcon(priv)->data.size = sizeof(nva3_pcopy_data);
        return 0;
 }
 
-static int
-nva3_copy_init(struct nouveau_object *object)
-{
-       struct nva3_copy_priv *priv = (void *)object;
-       int ret, i;
-
-       ret = nouveau_copy_init(&priv->base);
-       if (ret)
-               return ret;
-
-       /* disable all interrupts */
-       nv_wr32(priv, 0x104014, 0xffffffff);
-
-       /* upload ucode */
-       nv_wr32(priv, 0x1041c0, 0x01000000);
-       for (i = 0; i < sizeof(nva3_pcopy_data) / 4; i++)
-               nv_wr32(priv, 0x1041c4, nva3_pcopy_data[i]);
-
-       nv_wr32(priv, 0x104180, 0x01000000);
-       for (i = 0; i < sizeof(nva3_pcopy_code) / 4; i++) {
-               if ((i & 0x3f) == 0)
-                       nv_wr32(priv, 0x104188, i >> 6);
-               nv_wr32(priv, 0x104184, nva3_pcopy_code[i]);
-       }
-
-       /* start it running */
-       nv_wr32(priv, 0x10410c, 0x00000000);
-       nv_wr32(priv, 0x104104, 0x00000000); /* ENTRY */
-       nv_wr32(priv, 0x104100, 0x00000002); /* TRIGGER */
-       return 0;
-}
-
-static int
-nva3_copy_fini(struct nouveau_object *object, bool suspend)
-{
-       struct nva3_copy_priv *priv = (void *)object;
-
-       nv_mask(priv, 0x104048, 0x00000003, 0x00000000);
-       nv_wr32(priv, 0x104014, 0xffffffff);
-
-       return nouveau_copy_fini(&priv->base, suspend);
-}
-
 struct nouveau_oclass
 nva3_copy_oclass = {
        .handle = NV_ENGINE(COPY0, 0xa3),
        .ofuncs = &(struct nouveau_ofuncs) {
                .ctor = nva3_copy_ctor,
-               .dtor = _nouveau_copy_dtor,
-               .init = nva3_copy_init,
-               .fini = nva3_copy_fini,
+               .dtor = _nouveau_falcon_dtor,
+               .init = _nouveau_falcon_init,
+               .fini = _nouveau_falcon_fini,
+               .rd32 = _nouveau_falcon_rd32,
+               .wr32 = _nouveau_falcon_wr32,
        },
 };
index 06d4a87..b3ed273 100644 (file)
  * Authors: Ben Skeggs
  */
 
-#include <core/os.h>
-#include <core/enum.h>
+#include <core/falcon.h>
 #include <core/class.h>
-#include <core/engctx.h>
+#include <core/enum.h>
 
 #include <engine/fifo.h>
 #include <engine/copy.h>
 #include "fuc/nvc0.fuc.h"
 
 struct nvc0_copy_priv {
-       struct nouveau_copy base;
-};
-
-struct nvc0_copy_chan {
-       struct nouveau_copy_chan base;
+       struct nouveau_falcon base;
 };
 
 /*******************************************************************************
@@ -60,32 +55,14 @@ nvc0_copy1_sclass[] = {
  * PCOPY context
  ******************************************************************************/
 
-static int
-nvc0_copy_context_ctor(struct nouveau_object *parent,
-                      struct nouveau_object *engine,
-                      struct nouveau_oclass *oclass, void *data, u32 size,
-                      struct nouveau_object **pobject)
-{
-       struct nvc0_copy_chan *priv;
-       int ret;
-
-       ret = nouveau_copy_context_create(parent, engine, oclass, NULL, 256,
-                                         256, NVOBJ_FLAG_ZERO_ALLOC, &priv);
-       *pobject = nv_object(priv);
-       if (ret)
-               return ret;
-
-       return 0;
-}
-
 static struct nouveau_ofuncs
 nvc0_copy_context_ofuncs = {
-       .ctor = nvc0_copy_context_ctor,
-       .dtor = _nouveau_copy_context_dtor,
-       .init = _nouveau_copy_context_init,
-       .fini = _nouveau_copy_context_fini,
-       .rd32 = _nouveau_copy_context_rd32,
-       .wr32 = _nouveau_copy_context_wr32,
+       .ctor = _nouveau_falcon_context_ctor,
+       .dtor = _nouveau_falcon_context_dtor,
+       .init = _nouveau_falcon_context_init,
+       .fini = _nouveau_falcon_context_fini,
+       .rd32 = _nouveau_falcon_context_rd32,
+       .wr32 = _nouveau_falcon_context_wr32,
 };
 
 static struct nouveau_oclass
@@ -104,50 +81,18 @@ nvc0_copy1_cclass = {
  * PCOPY engine/subdev functions
  ******************************************************************************/
 
-static const struct nouveau_enum nvc0_copy_isr_error_name[] = {
-       { 0x0001, "ILLEGAL_MTHD" },
-       { 0x0002, "INVALID_ENUM" },
-       { 0x0003, "INVALID_BITFIELD" },
-       {}
-};
-
-static void
-nvc0_copy_intr(struct nouveau_subdev *subdev)
+static int
+nvc0_copy_init(struct nouveau_object *object)
 {
-       struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
-       struct nouveau_engine *engine = nv_engine(subdev);
-       struct nouveau_object *engctx;
-       int idx = nv_engidx(nv_object(subdev)) - NVDEV_ENGINE_COPY0;
-       struct nvc0_copy_priv *priv = (void *)subdev;
-       u32 disp = nv_rd32(priv, 0x10401c + (idx * 0x1000));
-       u32 intr = nv_rd32(priv, 0x104008 + (idx * 0x1000));
-       u32 stat = intr & disp & ~(disp >> 16);
-       u64 inst = nv_rd32(priv, 0x104050 + (idx * 0x1000)) & 0x0fffffff;
-       u32 ssta = nv_rd32(priv, 0x104040 + (idx * 0x1000)) & 0x0000ffff;
-       u32 addr = nv_rd32(priv, 0x104040 + (idx * 0x1000)) >> 16;
-       u32 mthd = (addr & 0x07ff) << 2;
-       u32 subc = (addr & 0x3800) >> 11;
-       u32 data = nv_rd32(priv, 0x104044 + (idx * 0x1000));
-       int chid;
-
-       engctx = nouveau_engctx_get(engine, inst);
-       chid   = pfifo->chid(pfifo, engctx);
-
-       if (stat & 0x00000040) {
-               nv_error(priv, "DISPATCH_ERROR [");
-               nouveau_enum_print(nvc0_copy_isr_error_name, ssta);
-               printk("] ch %d [0x%010llx] subc %d mthd 0x%04x data 0x%08x\n",
-                      chid, (u64)inst << 12, subc, mthd, data);
-               nv_wr32(priv, 0x104004 + (idx * 0x1000), 0x00000040);
-               stat &= ~0x00000040;
-       }
+       struct nvc0_copy_priv *priv = (void *)object;
+       int ret;
 
-       if (stat) {
-               nv_error(priv, "unhandled intr 0x%08x\n", stat);
-               nv_wr32(priv, 0x104004 + (idx * 0x1000), stat);
-       }
+       ret = nouveau_falcon_init(&priv->base);
+       if (ret)
+               return ret;
 
-       nouveau_engctx_put(engctx);
+       nv_wo32(priv, 0x084, nv_engidx(object) - NVDEV_ENGINE_COPY0);
+       return 0;
 }
 
 static int
@@ -161,15 +106,20 @@ nvc0_copy0_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        if (nv_rd32(parent, 0x022500) & 0x00000100)
                return -ENODEV;
 
-       ret = nouveau_copy_create(parent, engine, oclass, true, 0, &priv);
+       ret = nouveau_falcon_create(parent, engine, oclass, 0x104000, true,
+                                   "PCE0", "copy0", &priv);
        *pobject = nv_object(priv);
        if (ret)
                return ret;
 
        nv_subdev(priv)->unit = 0x00000040;
-       nv_subdev(priv)->intr = nvc0_copy_intr;
+       nv_subdev(priv)->intr = nva3_copy_intr;
        nv_engine(priv)->cclass = &nvc0_copy0_cclass;
        nv_engine(priv)->sclass = nvc0_copy0_sclass;
+       nv_falcon(priv)->code.data = nvc0_pcopy_code;
+       nv_falcon(priv)->code.size = sizeof(nvc0_pcopy_code);
+       nv_falcon(priv)->data.data = nvc0_pcopy_data;
+       nv_falcon(priv)->data.size = sizeof(nvc0_pcopy_data);
        return 0;
 }
 
@@ -184,72 +134,33 @@ nvc0_copy1_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        if (nv_rd32(parent, 0x022500) & 0x00000200)
                return -ENODEV;
 
-       ret = nouveau_copy_create(parent, engine, oclass, true, 1, &priv);
+       ret = nouveau_falcon_create(parent, engine, oclass, 0x105000, true,
+                                   "PCE1", "copy1", &priv);
        *pobject = nv_object(priv);
        if (ret)
                return ret;
 
        nv_subdev(priv)->unit = 0x00000080;
-       nv_subdev(priv)->intr = nvc0_copy_intr;
+       nv_subdev(priv)->intr = nva3_copy_intr;
        nv_engine(priv)->cclass = &nvc0_copy1_cclass;
        nv_engine(priv)->sclass = nvc0_copy1_sclass;
+       nv_falcon(priv)->code.data = nvc0_pcopy_code;
+       nv_falcon(priv)->code.size = sizeof(nvc0_pcopy_code);
+       nv_falcon(priv)->data.data = nvc0_pcopy_data;
+       nv_falcon(priv)->data.size = sizeof(nvc0_pcopy_data);
        return 0;
 }
 
-static int
-nvc0_copy_init(struct nouveau_object *object)
-{
-       int idx = nv_engidx(object) - NVDEV_ENGINE_COPY0;
-       struct nvc0_copy_priv *priv = (void *)object;
-       int ret, i;
-
-       ret = nouveau_copy_init(&priv->base);
-       if (ret)
-               return ret;
-
-       /* disable all interrupts */
-       nv_wr32(priv, 0x104014 + (idx * 0x1000), 0xffffffff);
-
-       /* upload ucode */
-       nv_wr32(priv, 0x1041c0 + (idx * 0x1000), 0x01000000);
-       for (i = 0; i < sizeof(nvc0_pcopy_data) / 4; i++)
-               nv_wr32(priv, 0x1041c4 + (idx * 0x1000), nvc0_pcopy_data[i]);
-
-       nv_wr32(priv, 0x104180 + (idx * 0x1000), 0x01000000);
-       for (i = 0; i < sizeof(nvc0_pcopy_code) / 4; i++) {
-               if ((i & 0x3f) == 0)
-                       nv_wr32(priv, 0x104188 + (idx * 0x1000), i >> 6);
-               nv_wr32(priv, 0x104184 + (idx * 0x1000), nvc0_pcopy_code[i]);
-       }
-
-       /* start it running */
-       nv_wr32(priv, 0x104084 + (idx * 0x1000), idx);
-       nv_wr32(priv, 0x10410c + (idx * 0x1000), 0x00000000);
-       nv_wr32(priv, 0x104104 + (idx * 0x1000), 0x00000000); /* ENTRY */
-       nv_wr32(priv, 0x104100 + (idx * 0x1000), 0x00000002); /* TRIGGER */
-       return 0;
-}
-
-static int
-nvc0_copy_fini(struct nouveau_object *object, bool suspend)
-{
-       int idx = nv_engidx(object) - NVDEV_ENGINE_COPY0;
-       struct nvc0_copy_priv *priv = (void *)object;
-
-       nv_mask(priv, 0x104048 + (idx * 0x1000), 0x00000003, 0x00000000);
-       nv_wr32(priv, 0x104014 + (idx * 0x1000), 0xffffffff);
-
-       return nouveau_copy_fini(&priv->base, suspend);
-}
-
 struct nouveau_oclass
 nvc0_copy0_oclass = {
        .handle = NV_ENGINE(COPY0, 0xc0),
        .ofuncs = &(struct nouveau_ofuncs) {
                .ctor = nvc0_copy0_ctor,
-               .dtor = _nouveau_copy_dtor,
+               .dtor = _nouveau_falcon_dtor,
                .init = nvc0_copy_init,
-               .fini = nvc0_copy_fini,
+               .fini = _nouveau_falcon_fini,
+               .rd32 = _nouveau_falcon_rd32,
+               .wr32 = _nouveau_falcon_wr32,
        },
 };
 
@@ -258,8 +169,10 @@ nvc0_copy1_oclass = {
        .handle = NV_ENGINE(COPY1, 0xc0),
        .ofuncs = &(struct nouveau_ofuncs) {
                .ctor = nvc0_copy1_ctor,
-               .dtor = _nouveau_copy_dtor,
+               .dtor = _nouveau_falcon_dtor,
                .init = nvc0_copy_init,
-               .fini = nvc0_copy_fini,
+               .fini = _nouveau_falcon_fini,
+               .rd32 = _nouveau_falcon_rd32,
+               .wr32 = _nouveau_falcon_wr32,
        },
 };
index 2017c15..dbbe9e8 100644 (file)
 #include <engine/copy.h>
 
 struct nve0_copy_priv {
-       struct nouveau_copy base;
-};
-
-struct nve0_copy_chan {
-       struct nouveau_copy_chan base;
+       struct nouveau_engine base;
 };
 
 /*******************************************************************************
@@ -51,32 +47,14 @@ nve0_copy_sclass[] = {
  * PCOPY context
  ******************************************************************************/
 
-static int
-nve0_copy_context_ctor(struct nouveau_object *parent,
-                      struct nouveau_object *engine,
-                      struct nouveau_oclass *oclass, void *data, u32 size,
-                      struct nouveau_object **pobject)
-{
-       struct nve0_copy_chan *priv;
-       int ret;
-
-       ret = nouveau_copy_context_create(parent, engine, oclass, NULL, 256,
-                                         256, NVOBJ_FLAG_ZERO_ALLOC, &priv);
-       *pobject = nv_object(priv);
-       if (ret)
-               return ret;
-
-       return 0;
-}
-
 static struct nouveau_ofuncs
 nve0_copy_context_ofuncs = {
-       .ctor = nve0_copy_context_ctor,
-       .dtor = _nouveau_copy_context_dtor,
-       .init = _nouveau_copy_context_init,
-       .fini = _nouveau_copy_context_fini,
-       .rd32 = _nouveau_copy_context_rd32,
-       .wr32 = _nouveau_copy_context_wr32,
+       .ctor = _nouveau_engctx_ctor,
+       .dtor = _nouveau_engctx_dtor,
+       .init = _nouveau_engctx_init,
+       .fini = _nouveau_engctx_fini,
+       .rd32 = _nouveau_engctx_rd32,
+       .wr32 = _nouveau_engctx_wr32,
 };
 
 static struct nouveau_oclass
@@ -100,7 +78,8 @@ nve0_copy0_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        if (nv_rd32(parent, 0x022500) & 0x00000100)
                return -ENODEV;
 
-       ret = nouveau_copy_create(parent, engine, oclass, true, 0, &priv);
+       ret = nouveau_engine_create(parent, engine, oclass, true,
+                                   "PCE0", "copy0", &priv);
        *pobject = nv_object(priv);
        if (ret)
                return ret;
@@ -122,7 +101,8 @@ nve0_copy1_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        if (nv_rd32(parent, 0x022500) & 0x00000200)
                return -ENODEV;
 
-       ret = nouveau_copy_create(parent, engine, oclass, true, 1, &priv);
+       ret = nouveau_engine_create(parent, engine, oclass, true,
+                                   "PCE1", "copy1", &priv);
        *pobject = nv_object(priv);
        if (ret)
                return ret;
@@ -138,9 +118,9 @@ nve0_copy0_oclass = {
        .handle = NV_ENGINE(COPY0, 0xe0),
        .ofuncs = &(struct nouveau_ofuncs) {
                .ctor = nve0_copy0_ctor,
-               .dtor = _nouveau_copy_dtor,
-               .init = _nouveau_copy_init,
-               .fini = _nouveau_copy_fini,
+               .dtor = _nouveau_engine_dtor,
+               .init = _nouveau_engine_init,
+               .fini = _nouveau_engine_fini,
        },
 };
 
@@ -149,8 +129,8 @@ nve0_copy1_oclass = {
        .handle = NV_ENGINE(COPY1, 0xe0),
        .ofuncs = &(struct nouveau_ofuncs) {
                .ctor = nve0_copy1_ctor,
-               .dtor = _nouveau_copy_dtor,
-               .init = _nouveau_copy_init,
-               .fini = _nouveau_copy_fini,
+               .dtor = _nouveau_engine_dtor,
+               .init = _nouveau_engine_init,
+               .fini = _nouveau_engine_fini,
        },
 };
index 1d85e5b..b974905 100644 (file)
 #include <engine/crypt.h>
 
 struct nv84_crypt_priv {
-       struct nouveau_crypt base;
-};
-
-struct nv84_crypt_chan {
-       struct nouveau_crypt_chan base;
+       struct nouveau_engine base;
 };
 
 /*******************************************************************************
@@ -87,34 +83,16 @@ nv84_crypt_sclass[] = {
  * PCRYPT context
  ******************************************************************************/
 
-static int
-nv84_crypt_context_ctor(struct nouveau_object *parent,
-                       struct nouveau_object *engine,
-                       struct nouveau_oclass *oclass, void *data, u32 size,
-                       struct nouveau_object **pobject)
-{
-       struct nv84_crypt_chan *priv;
-       int ret;
-
-       ret = nouveau_crypt_context_create(parent, engine, oclass, NULL, 256,
-                                          0, NVOBJ_FLAG_ZERO_ALLOC, &priv);
-       *pobject = nv_object(priv);
-       if (ret)
-               return ret;
-
-       return 0;
-}
-
 static struct nouveau_oclass
 nv84_crypt_cclass = {
        .handle = NV_ENGCTX(CRYPT, 0x84),
        .ofuncs = &(struct nouveau_ofuncs) {
-               .ctor = nv84_crypt_context_ctor,
-               .dtor = _nouveau_crypt_context_dtor,
-               .init = _nouveau_crypt_context_init,
-               .fini = _nouveau_crypt_context_fini,
-               .rd32 = _nouveau_crypt_context_rd32,
-               .wr32 = _nouveau_crypt_context_wr32,
+               .ctor = _nouveau_engctx_ctor,
+               .dtor = _nouveau_engctx_dtor,
+               .init = _nouveau_engctx_init,
+               .fini = _nouveau_engctx_fini,
+               .rd32 = _nouveau_engctx_rd32,
+               .wr32 = _nouveau_engctx_wr32,
        },
 };
 
@@ -157,7 +135,6 @@ nv84_crypt_intr(struct nouveau_subdev *subdev)
        nv_wr32(priv, 0x102130, stat);
        nv_wr32(priv, 0x10200c, 0x10);
 
-       nv50_fb_trap(nouveau_fb(priv), 1);
        nouveau_engctx_put(engctx);
 }
 
@@ -176,7 +153,8 @@ nv84_crypt_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        struct nv84_crypt_priv *priv;
        int ret;
 
-       ret = nouveau_crypt_create(parent, engine, oclass, &priv);
+       ret = nouveau_engine_create(parent, engine, oclass, true,
+                                   "PCRYPT", "crypt", &priv);
        *pobject = nv_object(priv);
        if (ret)
                return ret;
@@ -195,7 +173,7 @@ nv84_crypt_init(struct nouveau_object *object)
        struct nv84_crypt_priv *priv = (void *)object;
        int ret;
 
-       ret = nouveau_crypt_init(&priv->base);
+       ret = nouveau_engine_init(&priv->base);
        if (ret)
                return ret;
 
@@ -210,8 +188,8 @@ nv84_crypt_oclass = {
        .handle = NV_ENGINE(CRYPT, 0x84),
        .ofuncs = &(struct nouveau_ofuncs) {
                .ctor = nv84_crypt_ctor,
-               .dtor = _nouveau_crypt_dtor,
+               .dtor = _nouveau_engine_dtor,
                .init = nv84_crypt_init,
-               .fini = _nouveau_crypt_fini,
+               .fini = _nouveau_engine_fini,
        },
 };
index 9e3876c..21986f3 100644 (file)
@@ -26,6 +26,7 @@
 #include <core/enum.h>
 #include <core/class.h>
 #include <core/engctx.h>
+#include <core/falcon.h>
 
 #include <subdev/timer.h>
 #include <subdev/fb.h>
 #include "fuc/nv98.fuc.h"
 
 struct nv98_crypt_priv {
-       struct nouveau_crypt base;
-};
-
-struct nv98_crypt_chan {
-       struct nouveau_crypt_chan base;
+       struct nouveau_falcon base;
 };
 
 /*******************************************************************************
@@ -57,34 +54,16 @@ nv98_crypt_sclass[] = {
  * PCRYPT context
  ******************************************************************************/
 
-static int
-nv98_crypt_context_ctor(struct nouveau_object *parent,
-                       struct nouveau_object *engine,
-                       struct nouveau_oclass *oclass, void *data, u32 size,
-                       struct nouveau_object **pobject)
-{
-       struct nv98_crypt_chan *priv;
-       int ret;
-
-       ret = nouveau_crypt_context_create(parent, engine, oclass, NULL, 256,
-                                          256, NVOBJ_FLAG_ZERO_ALLOC, &priv);
-       *pobject = nv_object(priv);
-       if (ret)
-               return ret;
-
-       return 0;
-}
-
 static struct nouveau_oclass
 nv98_crypt_cclass = {
        .handle = NV_ENGCTX(CRYPT, 0x98),
        .ofuncs = &(struct nouveau_ofuncs) {
-               .ctor = nv98_crypt_context_ctor,
-               .dtor = _nouveau_crypt_context_dtor,
-               .init = _nouveau_crypt_context_init,
-               .fini = _nouveau_crypt_context_fini,
-               .rd32 = _nouveau_crypt_context_rd32,
-               .wr32 = _nouveau_crypt_context_wr32,
+               .ctor = _nouveau_falcon_context_ctor,
+               .dtor = _nouveau_falcon_context_dtor,
+               .init = _nouveau_falcon_context_init,
+               .fini = _nouveau_falcon_context_fini,
+               .rd32 = _nouveau_falcon_context_rd32,
+               .wr32 = _nouveau_falcon_context_wr32,
        },
 };
 
@@ -134,7 +113,6 @@ nv98_crypt_intr(struct nouveau_subdev *subdev)
                nv_wr32(priv, 0x087004, stat);
        }
 
-       nv50_fb_trap(nouveau_fb(priv), 1);
        nouveau_engctx_put(engctx);
 }
 
@@ -153,7 +131,8 @@ nv98_crypt_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        struct nv98_crypt_priv *priv;
        int ret;
 
-       ret = nouveau_crypt_create(parent, engine, oclass, &priv);
+       ret = nouveau_falcon_create(parent, engine, oclass, 0x087000, true,
+                                   "PCRYPT", "crypt", &priv);
        *pobject = nv_object(priv);
        if (ret)
                return ret;
@@ -163,36 +142,10 @@ nv98_crypt_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        nv_engine(priv)->cclass = &nv98_crypt_cclass;
        nv_engine(priv)->sclass = nv98_crypt_sclass;
        nv_engine(priv)->tlb_flush = nv98_crypt_tlb_flush;
-       return 0;
-}
-
-static int
-nv98_crypt_init(struct nouveau_object *object)
-{
-       struct nv98_crypt_priv *priv = (void *)object;
-       int ret, i;
-
-       ret = nouveau_crypt_init(&priv->base);
-       if (ret)
-               return ret;
-
-       /* wait for exit interrupt to signal */
-       nv_wait(priv, 0x087008, 0x00000010, 0x00000010);
-       nv_wr32(priv, 0x087004, 0x00000010);
-
-       /* upload microcode code and data segments */
-       nv_wr32(priv, 0x087ff8, 0x00100000);
-       for (i = 0; i < ARRAY_SIZE(nv98_pcrypt_code); i++)
-               nv_wr32(priv, 0x087ff4, nv98_pcrypt_code[i]);
-
-       nv_wr32(priv, 0x087ff8, 0x00000000);
-       for (i = 0; i < ARRAY_SIZE(nv98_pcrypt_data); i++)
-               nv_wr32(priv, 0x087ff4, nv98_pcrypt_data[i]);
-
-       /* start it running */
-       nv_wr32(priv, 0x08710c, 0x00000000);
-       nv_wr32(priv, 0x087104, 0x00000000); /* ENTRY */
-       nv_wr32(priv, 0x087100, 0x00000002); /* TRIGGER */
+       nv_falcon(priv)->code.data = nv98_pcrypt_code;
+       nv_falcon(priv)->code.size = sizeof(nv98_pcrypt_code);
+       nv_falcon(priv)->data.data = nv98_pcrypt_data;
+       nv_falcon(priv)->data.size = sizeof(nv98_pcrypt_data);
        return 0;
 }
 
@@ -201,8 +154,10 @@ nv98_crypt_oclass = {
        .handle = NV_ENGINE(CRYPT, 0x98),
        .ofuncs = &(struct nouveau_ofuncs) {
                .ctor = nv98_crypt_ctor,
-               .dtor = _nouveau_crypt_dtor,
-               .init = nv98_crypt_init,
-               .fini = _nouveau_crypt_fini,
+               .dtor = _nouveau_falcon_dtor,
+               .init = _nouveau_falcon_init,
+               .fini = _nouveau_falcon_fini,
+               .rd32 = _nouveau_falcon_rd32,
+               .wr32 = _nouveau_falcon_wr32,
        },
 };
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c
new file mode 100644 (file)
index 0000000..d0817d9
--- /dev/null
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/timer.h>
+
+#include "nv50.h"
+
+int
+nv50_dac_power(struct nv50_disp_priv *priv, int or, u32 data)
+{
+       const u32 stat = (data & NV50_DISP_DAC_PWR_HSYNC) |
+                        (data & NV50_DISP_DAC_PWR_VSYNC) |
+                        (data & NV50_DISP_DAC_PWR_DATA) |
+                        (data & NV50_DISP_DAC_PWR_STATE);
+       const u32 doff = (or * 0x800);
+       nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
+       nv_mask(priv, 0x61a004 + doff, 0xc000007f, 0x80000000 | stat);
+       nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
+       return 0;
+}
+
+int
+nv50_dac_sense(struct nv50_disp_priv *priv, int or, u32 loadval)
+{
+       const u32 doff = (or * 0x800);
+       int load = -EINVAL;
+       nv_wr32(priv, 0x61a00c + doff, 0x00100000 | loadval);
+       udelay(9500);
+       nv_wr32(priv, 0x61a00c + doff, 0x80000000);
+       load = (nv_rd32(priv, 0x61a00c + doff) & 0x38000000) >> 27;
+       nv_wr32(priv, 0x61a00c + doff, 0x00000000);
+       return load;
+}
+
+int
+nv50_dac_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size)
+{
+       struct nv50_disp_priv *priv = (void *)object->engine;
+       const u8 or = (mthd & NV50_DISP_DAC_MTHD_OR);
+       u32 *data = args;
+       int ret;
+
+       if (size < sizeof(u32))
+               return -EINVAL;
+
+       switch (mthd & ~0x3f) {
+       case NV50_DISP_DAC_PWR:
+               ret = priv->dac.power(priv, or, data[0]);
+               break;
+       case NV50_DISP_DAC_LOAD:
+               ret = priv->dac.sense(priv, or, data[0]);
+               if (ret >= 0) {
+                       data[0] = ret;
+                       ret = 0;
+               }
+               break;
+       default:
+               BUG_ON(1);
+       }
+
+       return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c
new file mode 100644 (file)
index 0000000..373dbcc
--- /dev/null
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include "nv50.h"
+
+int
+nva3_hda_eld(struct nv50_disp_priv *priv, int or, u8 *data, u32 size)
+{
+       const u32 soff = (or * 0x800);
+       int i;
+
+       if (data && data[0]) {
+               for (i = 0; i < size; i++)
+                       nv_wr32(priv, 0x61c440 + soff, (i << 8) | data[i]);
+               nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000003);
+       } else
+       if (data) {
+               nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000001);
+       } else {
+               nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000000);
+       }
+
+       return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c
new file mode 100644 (file)
index 0000000..dc57e24
--- /dev/null
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/dp.h>
+#include <subdev/bios/init.h>
+
+#include "nv50.h"
+
+int
+nvd0_hda_eld(struct nv50_disp_priv *priv, int or, u8 *data, u32 size)
+{
+       const u32 soff = (or * 0x030);
+       int i;
+
+       if (data && data[0]) {
+               for (i = 0; i < size; i++)
+                       nv_wr32(priv, 0x10ec00 + soff, (i << 8) | data[i]);
+               nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000003);
+       } else
+       if (data) {
+               nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000001);
+       } else {
+               nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000000);
+       }
+
+       return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c
new file mode 100644 (file)
index 0000000..0d36bdc
--- /dev/null
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include "nv50.h"
+
+int
+nv84_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data)
+{
+       const u32 hoff = (head * 0x800);
+
+       if (!(data & NV84_DISP_SOR_HDMI_PWR_STATE_ON)) {
+               nv_mask(priv, 0x6165a4 + hoff, 0x40000000, 0x00000000);
+               nv_mask(priv, 0x616520 + hoff, 0x00000001, 0x00000000);
+               nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000000);
+               return 0;
+       }
+
+       /* AVI InfoFrame */
+       nv_mask(priv, 0x616520 + hoff, 0x00000001, 0x00000000);
+       nv_wr32(priv, 0x616528 + hoff, 0x000d0282);
+       nv_wr32(priv, 0x61652c + hoff, 0x0000006f);
+       nv_wr32(priv, 0x616530 + hoff, 0x00000000);
+       nv_wr32(priv, 0x616534 + hoff, 0x00000000);
+       nv_wr32(priv, 0x616538 + hoff, 0x00000000);
+       nv_mask(priv, 0x616520 + hoff, 0x00000001, 0x00000001);
+
+       /* Audio InfoFrame */
+       nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000000);
+       nv_wr32(priv, 0x616508 + hoff, 0x000a0184);
+       nv_wr32(priv, 0x61650c + hoff, 0x00000071);
+       nv_wr32(priv, 0x616510 + hoff, 0x00000000);
+       nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000001);
+
+       /* ??? */
+       nv_mask(priv, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
+       nv_mask(priv, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
+       nv_mask(priv, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
+
+       /* HDMI_CTRL */
+       nv_mask(priv, 0x6165a4 + hoff, 0x5f1f007f, data | 0x1f000000 /* ??? */);
+       return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c
new file mode 100644 (file)
index 0000000..f065fc2
--- /dev/null
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include "nv50.h"
+
+int
+nva3_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data)
+{
+       const u32 soff = (or * 0x800);
+
+       if (!(data & NV84_DISP_SOR_HDMI_PWR_STATE_ON)) {
+               nv_mask(priv, 0x61c5a4 + soff, 0x40000000, 0x00000000);
+               nv_mask(priv, 0x61c520 + soff, 0x00000001, 0x00000000);
+               nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000000);
+               return 0;
+       }
+
+       /* AVI InfoFrame */
+       nv_mask(priv, 0x61c520 + soff, 0x00000001, 0x00000000);
+       nv_wr32(priv, 0x61c528 + soff, 0x000d0282);
+       nv_wr32(priv, 0x61c52c + soff, 0x0000006f);
+       nv_wr32(priv, 0x61c530 + soff, 0x00000000);
+       nv_wr32(priv, 0x61c534 + soff, 0x00000000);
+       nv_wr32(priv, 0x61c538 + soff, 0x00000000);
+       nv_mask(priv, 0x61c520 + soff, 0x00000001, 0x00000001);
+
+       /* Audio InfoFrame */
+       nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000000);
+       nv_wr32(priv, 0x61c508 + soff, 0x000a0184);
+       nv_wr32(priv, 0x61c50c + soff, 0x00000071);
+       nv_wr32(priv, 0x61c510 + soff, 0x00000000);
+       nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000001);
+
+       /* ??? */
+       nv_mask(priv, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
+       nv_mask(priv, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
+       nv_mask(priv, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
+
+       /* HDMI_CTRL */
+       nv_mask(priv, 0x61c5a4 + soff, 0x5f1f007f, data | 0x1f000000 /* ??? */);
+       return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c
new file mode 100644 (file)
index 0000000..5151bb2
--- /dev/null
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include "nv50.h"
+
+int
+nvd0_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data)
+{
+       const u32 hoff = (head * 0x800);
+
+       if (!(data & NV84_DISP_SOR_HDMI_PWR_STATE_ON)) {
+               nv_mask(priv, 0x616798 + hoff, 0x40000000, 0x00000000);
+               nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000000);
+               nv_mask(priv, 0x616714 + hoff, 0x00000001, 0x00000000);
+               return 0;
+       }
+
+       /* AVI InfoFrame */
+       nv_mask(priv, 0x616714 + hoff, 0x00000001, 0x00000000);
+       nv_wr32(priv, 0x61671c + hoff, 0x000d0282);
+       nv_wr32(priv, 0x616720 + hoff, 0x0000006f);
+       nv_wr32(priv, 0x616724 + hoff, 0x00000000);
+       nv_wr32(priv, 0x616728 + hoff, 0x00000000);
+       nv_wr32(priv, 0x61672c + hoff, 0x00000000);
+       nv_mask(priv, 0x616714 + hoff, 0x00000001, 0x00000001);
+
+       /* ??? InfoFrame? */
+       nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000000);
+       nv_wr32(priv, 0x6167ac + hoff, 0x00000010);
+       nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000001);
+
+       /* HDMI_CTRL */
+       nv_mask(priv, 0x616798 + hoff, 0x401f007f, data);
+
+       /* NFI, audio doesn't work without it though.. */
+       nv_mask(priv, 0x616548 + hoff, 0x00000070, 0x00000000);
+       return 0;
+}
index 15b182c..0f09af1 100644 (file)
  * Authors: Ben Skeggs
  */
 
-#include <subdev/bar.h>
+#include <core/object.h>
+#include <core/parent.h>
+#include <core/handle.h>
+#include <core/class.h>
 
 #include <engine/software.h>
 #include <engine/disp.h>
 
-struct nv50_disp_priv {
-       struct nouveau_disp base;
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/disp.h>
+#include <subdev/bios/init.h>
+#include <subdev/bios/pll.h>
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+#include <subdev/bar.h>
+#include <subdev/clock.h>
+
+#include "nv50.h"
+
+/*******************************************************************************
+ * EVO channel base class
+ ******************************************************************************/
+
+int
+nv50_disp_chan_create_(struct nouveau_object *parent,
+                      struct nouveau_object *engine,
+                      struct nouveau_oclass *oclass, int chid,
+                      int length, void **pobject)
+{
+       struct nv50_disp_base *base = (void *)parent;
+       struct nv50_disp_chan *chan;
+       int ret;
+
+       if (base->chan & (1 << chid))
+               return -EBUSY;
+       base->chan |= (1 << chid);
+
+       ret = nouveau_namedb_create_(parent, engine, oclass, 0, NULL,
+                                    (1ULL << NVDEV_ENGINE_DMAOBJ),
+                                    length, pobject);
+       chan = *pobject;
+       if (ret)
+               return ret;
+
+       chan->chid = chid;
+       return 0;
+}
+
+void
+nv50_disp_chan_destroy(struct nv50_disp_chan *chan)
+{
+       struct nv50_disp_base *base = (void *)nv_object(chan)->parent;
+       base->chan &= ~(1 << chan->chid);
+       nouveau_namedb_destroy(&chan->base);
+}
+
+u32
+nv50_disp_chan_rd32(struct nouveau_object *object, u64 addr)
+{
+       struct nv50_disp_priv *priv = (void *)object->engine;
+       struct nv50_disp_chan *chan = (void *)object;
+       return nv_rd32(priv, 0x640000 + (chan->chid * 0x1000) + addr);
+}
+
+void
+nv50_disp_chan_wr32(struct nouveau_object *object, u64 addr, u32 data)
+{
+       struct nv50_disp_priv *priv = (void *)object->engine;
+       struct nv50_disp_chan *chan = (void *)object;
+       nv_wr32(priv, 0x640000 + (chan->chid * 0x1000) + addr, data);
+}
+
+/*******************************************************************************
+ * EVO DMA channel base class
+ ******************************************************************************/
+
+static int
+nv50_disp_dmac_object_attach(struct nouveau_object *parent,
+                            struct nouveau_object *object, u32 name)
+{
+       struct nv50_disp_base *base = (void *)parent->parent;
+       struct nv50_disp_chan *chan = (void *)parent;
+       u32 addr = nv_gpuobj(object)->node->offset;
+       u32 chid = chan->chid;
+       u32 data = (chid << 28) | (addr << 10) | chid;
+       return nouveau_ramht_insert(base->ramht, chid, name, data);
+}
+
+static void
+nv50_disp_dmac_object_detach(struct nouveau_object *parent, int cookie)
+{
+       struct nv50_disp_base *base = (void *)parent->parent;
+       nouveau_ramht_remove(base->ramht, cookie);
+}
+
+int
+nv50_disp_dmac_create_(struct nouveau_object *parent,
+                      struct nouveau_object *engine,
+                      struct nouveau_oclass *oclass, u32 pushbuf, int chid,
+                      int length, void **pobject)
+{
+       struct nv50_disp_dmac *dmac;
+       int ret;
+
+       ret = nv50_disp_chan_create_(parent, engine, oclass, chid,
+                                    length, pobject);
+       dmac = *pobject;
+       if (ret)
+               return ret;
+
+       dmac->pushdma = (void *)nouveau_handle_ref(parent, pushbuf);
+       if (!dmac->pushdma)
+               return -ENOENT;
+
+       switch (nv_mclass(dmac->pushdma)) {
+       case 0x0002:
+       case 0x003d:
+               if (dmac->pushdma->limit - dmac->pushdma->start != 0xfff)
+                       return -EINVAL;
+
+               switch (dmac->pushdma->target) {
+               case NV_MEM_TARGET_VRAM:
+                       dmac->push = 0x00000000 | dmac->pushdma->start >> 8;
+                       break;
+               case NV_MEM_TARGET_PCI_NOSNOOP:
+                       dmac->push = 0x00000003 | dmac->pushdma->start >> 8;
+                       break;
+               default:
+                       return -EINVAL;
+               }
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+void
+nv50_disp_dmac_dtor(struct nouveau_object *object)
+{
+       struct nv50_disp_dmac *dmac = (void *)object;
+       nouveau_object_ref(NULL, (struct nouveau_object **)&dmac->pushdma);
+       nv50_disp_chan_destroy(&dmac->base);
+}
+
+static int
+nv50_disp_dmac_init(struct nouveau_object *object)
+{
+       struct nv50_disp_priv *priv = (void *)object->engine;
+       struct nv50_disp_dmac *dmac = (void *)object;
+       int chid = dmac->base.chid;
+       int ret;
+
+       ret = nv50_disp_chan_init(&dmac->base);
+       if (ret)
+               return ret;
+
+       /* enable error reporting */
+       nv_mask(priv, 0x610028, 0x00010001 << chid, 0x00010001 << chid);
+
+       /* initialise channel for dma command submission */
+       nv_wr32(priv, 0x610204 + (chid * 0x0010), dmac->push);
+       nv_wr32(priv, 0x610208 + (chid * 0x0010), 0x00010000);
+       nv_wr32(priv, 0x61020c + (chid * 0x0010), chid);
+       nv_mask(priv, 0x610200 + (chid * 0x0010), 0x00000010, 0x00000010);
+       nv_wr32(priv, 0x640000 + (chid * 0x1000), 0x00000000);
+       nv_wr32(priv, 0x610200 + (chid * 0x0010), 0x00000013);
+
+       /* wait for it to go inactive */
+       if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x80000000, 0x00000000)) {
+               nv_error(dmac, "init timeout, 0x%08x\n",
+                        nv_rd32(priv, 0x610200 + (chid * 0x10)));
+               return -EBUSY;
+       }
+
+       return 0;
+}
+
+static int
+nv50_disp_dmac_fini(struct nouveau_object *object, bool suspend)
+{
+       struct nv50_disp_priv *priv = (void *)object->engine;
+       struct nv50_disp_dmac *dmac = (void *)object;
+       int chid = dmac->base.chid;
+
+       /* deactivate channel */
+       nv_mask(priv, 0x610200 + (chid * 0x0010), 0x00001010, 0x00001000);
+       nv_mask(priv, 0x610200 + (chid * 0x0010), 0x00000003, 0x00000000);
+       if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x001e0000, 0x00000000)) {
+               nv_error(dmac, "fini timeout, 0x%08x\n",
+                        nv_rd32(priv, 0x610200 + (chid * 0x10)));
+               if (suspend)
+                       return -EBUSY;
+       }
+
+       /* disable error reporting */
+       nv_mask(priv, 0x610028, 0x00010001 << chid, 0x00000000 << chid);
+
+       return nv50_disp_chan_fini(&dmac->base, suspend);
+}
+
+/*******************************************************************************
+ * EVO master channel object
+ ******************************************************************************/
+
+static int
+nv50_disp_mast_ctor(struct nouveau_object *parent,
+                   struct nouveau_object *engine,
+                   struct nouveau_oclass *oclass, void *data, u32 size,
+                   struct nouveau_object **pobject)
+{
+       struct nv50_display_mast_class *args = data;
+       struct nv50_disp_dmac *mast;
+       int ret;
+
+       if (size < sizeof(*args))
+               return -EINVAL;
+
+       ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
+                                    0, sizeof(*mast), (void **)&mast);
+       *pobject = nv_object(mast);
+       if (ret)
+               return ret;
+
+       nv_parent(mast)->object_attach = nv50_disp_dmac_object_attach;
+       nv_parent(mast)->object_detach = nv50_disp_dmac_object_detach;
+       return 0;
+}
+
+static int
+nv50_disp_mast_init(struct nouveau_object *object)
+{
+       struct nv50_disp_priv *priv = (void *)object->engine;
+       struct nv50_disp_dmac *mast = (void *)object;
+       int ret;
+
+       ret = nv50_disp_chan_init(&mast->base);
+       if (ret)
+               return ret;
+
+       /* enable error reporting */
+       nv_mask(priv, 0x610028, 0x00010001, 0x00010001);
+
+       /* attempt to unstick channel from some unknown state */
+       if ((nv_rd32(priv, 0x610200) & 0x009f0000) == 0x00020000)
+               nv_mask(priv, 0x610200, 0x00800000, 0x00800000);
+       if ((nv_rd32(priv, 0x610200) & 0x003f0000) == 0x00030000)
+               nv_mask(priv, 0x610200, 0x00600000, 0x00600000);
+
+       /* initialise channel for dma command submission */
+       nv_wr32(priv, 0x610204, mast->push);
+       nv_wr32(priv, 0x610208, 0x00010000);
+       nv_wr32(priv, 0x61020c, 0x00000000);
+       nv_mask(priv, 0x610200, 0x00000010, 0x00000010);
+       nv_wr32(priv, 0x640000, 0x00000000);
+       nv_wr32(priv, 0x610200, 0x01000013);
+
+       /* wait for it to go inactive */
+       if (!nv_wait(priv, 0x610200, 0x80000000, 0x00000000)) {
+               nv_error(mast, "init: 0x%08x\n", nv_rd32(priv, 0x610200));
+               return -EBUSY;
+       }
+
+       return 0;
+}
+
+static int
+nv50_disp_mast_fini(struct nouveau_object *object, bool suspend)
+{
+       struct nv50_disp_priv *priv = (void *)object->engine;
+       struct nv50_disp_dmac *mast = (void *)object;
+
+       /* deactivate channel */
+       nv_mask(priv, 0x610200, 0x00000010, 0x00000000);
+       nv_mask(priv, 0x610200, 0x00000003, 0x00000000);
+       if (!nv_wait(priv, 0x610200, 0x001e0000, 0x00000000)) {
+               nv_error(mast, "fini: 0x%08x\n", nv_rd32(priv, 0x610200));
+               if (suspend)
+                       return -EBUSY;
+       }
+
+       /* disable error reporting */
+       nv_mask(priv, 0x610028, 0x00010001, 0x00000000);
+
+       return nv50_disp_chan_fini(&mast->base, suspend);
+}
+
+struct nouveau_ofuncs
+nv50_disp_mast_ofuncs = {
+       .ctor = nv50_disp_mast_ctor,
+       .dtor = nv50_disp_dmac_dtor,
+       .init = nv50_disp_mast_init,
+       .fini = nv50_disp_mast_fini,
+       .rd32 = nv50_disp_chan_rd32,
+       .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO sync channel objects
+ ******************************************************************************/
+
+static int
+nv50_disp_sync_ctor(struct nouveau_object *parent,
+                   struct nouveau_object *engine,
+                   struct nouveau_oclass *oclass, void *data, u32 size,
+                   struct nouveau_object **pobject)
+{
+       struct nv50_display_sync_class *args = data;
+       struct nv50_disp_dmac *dmac;
+       int ret;
+
+       if (size < sizeof(*data) || args->head > 1)
+               return -EINVAL;
+
+       ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
+                                    1 + args->head, sizeof(*dmac),
+                                    (void **)&dmac);
+       *pobject = nv_object(dmac);
+       if (ret)
+               return ret;
+
+       nv_parent(dmac)->object_attach = nv50_disp_dmac_object_attach;
+       nv_parent(dmac)->object_detach = nv50_disp_dmac_object_detach;
+       return 0;
+}
+
+struct nouveau_ofuncs
+nv50_disp_sync_ofuncs = {
+       .ctor = nv50_disp_sync_ctor,
+       .dtor = nv50_disp_dmac_dtor,
+       .init = nv50_disp_dmac_init,
+       .fini = nv50_disp_dmac_fini,
+       .rd32 = nv50_disp_chan_rd32,
+       .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO overlay channel objects
+ ******************************************************************************/
+
+static int
+nv50_disp_ovly_ctor(struct nouveau_object *parent,
+                   struct nouveau_object *engine,
+                   struct nouveau_oclass *oclass, void *data, u32 size,
+                   struct nouveau_object **pobject)
+{
+       struct nv50_display_ovly_class *args = data;
+       struct nv50_disp_dmac *dmac;
+       int ret;
+
+       if (size < sizeof(*data) || args->head > 1)
+               return -EINVAL;
+
+       ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
+                                    3 + args->head, sizeof(*dmac),
+                                    (void **)&dmac);
+       *pobject = nv_object(dmac);
+       if (ret)
+               return ret;
+
+       nv_parent(dmac)->object_attach = nv50_disp_dmac_object_attach;
+       nv_parent(dmac)->object_detach = nv50_disp_dmac_object_detach;
+       return 0;
+}
+
+struct nouveau_ofuncs
+nv50_disp_ovly_ofuncs = {
+       .ctor = nv50_disp_ovly_ctor,
+       .dtor = nv50_disp_dmac_dtor,
+       .init = nv50_disp_dmac_init,
+       .fini = nv50_disp_dmac_fini,
+       .rd32 = nv50_disp_chan_rd32,
+       .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO PIO channel base class
+ ******************************************************************************/
+
+static int
+nv50_disp_pioc_create_(struct nouveau_object *parent,
+                      struct nouveau_object *engine,
+                      struct nouveau_oclass *oclass, int chid,
+                      int length, void **pobject)
+{
+       return nv50_disp_chan_create_(parent, engine, oclass, chid,
+                                     length, pobject);
+}
+
+static void
+nv50_disp_pioc_dtor(struct nouveau_object *object)
+{
+       struct nv50_disp_pioc *pioc = (void *)object;
+       nv50_disp_chan_destroy(&pioc->base);
+}
+
+static int
+nv50_disp_pioc_init(struct nouveau_object *object)
+{
+       struct nv50_disp_priv *priv = (void *)object->engine;
+       struct nv50_disp_pioc *pioc = (void *)object;
+       int chid = pioc->base.chid;
+       int ret;
+
+       ret = nv50_disp_chan_init(&pioc->base);
+       if (ret)
+               return ret;
+
+       nv_wr32(priv, 0x610200 + (chid * 0x10), 0x00002000);
+       if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x00000000, 0x00000000)) {
+               nv_error(pioc, "timeout0: 0x%08x\n",
+                        nv_rd32(priv, 0x610200 + (chid * 0x10)));
+               return -EBUSY;
+       }
+
+       nv_wr32(priv, 0x610200 + (chid * 0x10), 0x00000001);
+       if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x00030000, 0x00010000)) {
+               nv_error(pioc, "timeout1: 0x%08x\n",
+                        nv_rd32(priv, 0x610200 + (chid * 0x10)));
+               return -EBUSY;
+       }
+
+       return 0;
+}
+
+static int
+nv50_disp_pioc_fini(struct nouveau_object *object, bool suspend)
+{
+       struct nv50_disp_priv *priv = (void *)object->engine;
+       struct nv50_disp_pioc *pioc = (void *)object;
+       int chid = pioc->base.chid;
+
+       nv_mask(priv, 0x610200 + (chid * 0x10), 0x00000001, 0x00000000);
+       if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x00030000, 0x00000000)) {
+               nv_error(pioc, "timeout: 0x%08x\n",
+                        nv_rd32(priv, 0x610200 + (chid * 0x10)));
+               if (suspend)
+                       return -EBUSY;
+       }
+
+       return nv50_disp_chan_fini(&pioc->base, suspend);
+}
+
+/*******************************************************************************
+ * EVO immediate overlay channel objects
+ ******************************************************************************/
+
+static int
+nv50_disp_oimm_ctor(struct nouveau_object *parent,
+                   struct nouveau_object *engine,
+                   struct nouveau_oclass *oclass, void *data, u32 size,
+                   struct nouveau_object **pobject)
+{
+       struct nv50_display_oimm_class *args = data;
+       struct nv50_disp_pioc *pioc;
+       int ret;
+
+       if (size < sizeof(*args) || args->head > 1)
+               return -EINVAL;
+
+       ret = nv50_disp_pioc_create_(parent, engine, oclass, 5 + args->head,
+                                    sizeof(*pioc), (void **)&pioc);
+       *pobject = nv_object(pioc);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+struct nouveau_ofuncs
+nv50_disp_oimm_ofuncs = {
+       .ctor = nv50_disp_oimm_ctor,
+       .dtor = nv50_disp_pioc_dtor,
+       .init = nv50_disp_pioc_init,
+       .fini = nv50_disp_pioc_fini,
+       .rd32 = nv50_disp_chan_rd32,
+       .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO cursor channel objects
+ ******************************************************************************/
+
+static int
+nv50_disp_curs_ctor(struct nouveau_object *parent,
+                   struct nouveau_object *engine,
+                   struct nouveau_oclass *oclass, void *data, u32 size,
+                   struct nouveau_object **pobject)
+{
+       struct nv50_display_curs_class *args = data;
+       struct nv50_disp_pioc *pioc;
+       int ret;
+
+       if (size < sizeof(*args) || args->head > 1)
+               return -EINVAL;
+
+       ret = nv50_disp_pioc_create_(parent, engine, oclass, 7 + args->head,
+                                    sizeof(*pioc), (void **)&pioc);
+       *pobject = nv_object(pioc);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+struct nouveau_ofuncs
+nv50_disp_curs_ofuncs = {
+       .ctor = nv50_disp_curs_ctor,
+       .dtor = nv50_disp_pioc_dtor,
+       .init = nv50_disp_pioc_init,
+       .fini = nv50_disp_pioc_fini,
+       .rd32 = nv50_disp_chan_rd32,
+       .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * Base display object
+ ******************************************************************************/
+
+static int
+nv50_disp_base_ctor(struct nouveau_object *parent,
+                   struct nouveau_object *engine,
+                   struct nouveau_oclass *oclass, void *data, u32 size,
+                   struct nouveau_object **pobject)
+{
+       struct nv50_disp_priv *priv = (void *)engine;
+       struct nv50_disp_base *base;
+       int ret;
+
+       ret = nouveau_parent_create(parent, engine, oclass, 0,
+                                   priv->sclass, 0, &base);
+       *pobject = nv_object(base);
+       if (ret)
+               return ret;
+
+       return nouveau_ramht_new(parent, parent, 0x1000, 0, &base->ramht);
+}
+
+static void
+nv50_disp_base_dtor(struct nouveau_object *object)
+{
+       struct nv50_disp_base *base = (void *)object;
+       nouveau_ramht_ref(NULL, &base->ramht);
+       nouveau_parent_destroy(&base->base);
+}
+
+static int
+nv50_disp_base_init(struct nouveau_object *object)
+{
+       struct nv50_disp_priv *priv = (void *)object->engine;
+       struct nv50_disp_base *base = (void *)object;
+       int ret, i;
+       u32 tmp;
+
+       ret = nouveau_parent_init(&base->base);
+       if (ret)
+               return ret;
+
+       /* The below segments of code copying values from one register to
+        * another appear to inform EVO of the display capabilities or
+        * something similar.  NFI what the 0x614004 caps are for..
+        */
+       tmp = nv_rd32(priv, 0x614004);
+       nv_wr32(priv, 0x610184, tmp);
+
+       /* ... CRTC caps */
+       for (i = 0; i < priv->head.nr; i++) {
+               tmp = nv_rd32(priv, 0x616100 + (i * 0x800));
+               nv_wr32(priv, 0x610190 + (i * 0x10), tmp);
+               tmp = nv_rd32(priv, 0x616104 + (i * 0x800));
+               nv_wr32(priv, 0x610194 + (i * 0x10), tmp);
+               tmp = nv_rd32(priv, 0x616108 + (i * 0x800));
+               nv_wr32(priv, 0x610198 + (i * 0x10), tmp);
+               tmp = nv_rd32(priv, 0x61610c + (i * 0x800));
+               nv_wr32(priv, 0x61019c + (i * 0x10), tmp);
+       }
+
+       /* ... DAC caps */
+       for (i = 0; i < priv->dac.nr; i++) {
+               tmp = nv_rd32(priv, 0x61a000 + (i * 0x800));
+               nv_wr32(priv, 0x6101d0 + (i * 0x04), tmp);
+       }
+
+       /* ... SOR caps */
+       for (i = 0; i < priv->sor.nr; i++) {
+               tmp = nv_rd32(priv, 0x61c000 + (i * 0x800));
+               nv_wr32(priv, 0x6101e0 + (i * 0x04), tmp);
+       }
+
+       /* ... EXT caps */
+       for (i = 0; i < 3; i++) {
+               tmp = nv_rd32(priv, 0x61e000 + (i * 0x800));
+               nv_wr32(priv, 0x6101f0 + (i * 0x04), tmp);
+       }
+
+       /* steal display away from vbios, or something like that */
+       if (nv_rd32(priv, 0x610024) & 0x00000100) {
+               nv_wr32(priv, 0x610024, 0x00000100);
+               nv_mask(priv, 0x6194e8, 0x00000001, 0x00000000);
+               if (!nv_wait(priv, 0x6194e8, 0x00000002, 0x00000000)) {
+                       nv_error(priv, "timeout acquiring display\n");
+                       return -EBUSY;
+               }
+       }
+
+       /* point at display engine memory area (hash table, objects) */
+       nv_wr32(priv, 0x610010, (nv_gpuobj(base->ramht)->addr >> 8) | 9);
+
+       /* enable supervisor interrupts, disable everything else */
+       nv_wr32(priv, 0x61002c, 0x00000370);
+       nv_wr32(priv, 0x610028, 0x00000000);
+       return 0;
+}
+
+static int
+nv50_disp_base_fini(struct nouveau_object *object, bool suspend)
+{
+       struct nv50_disp_priv *priv = (void *)object->engine;
+       struct nv50_disp_base *base = (void *)object;
+
+       /* disable all interrupts */
+       nv_wr32(priv, 0x610024, 0x00000000);
+       nv_wr32(priv, 0x610020, 0x00000000);
+
+       return nouveau_parent_fini(&base->base, suspend);
+}
+
+struct nouveau_ofuncs
+nv50_disp_base_ofuncs = {
+       .ctor = nv50_disp_base_ctor,
+       .dtor = nv50_disp_base_dtor,
+       .init = nv50_disp_base_init,
+       .fini = nv50_disp_base_fini,
+};
+
+static struct nouveau_omthds
+nv50_disp_base_omthds[] = {
+       { SOR_MTHD(NV50_DISP_SOR_PWR)         , nv50_sor_mthd },
+       { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
+       { DAC_MTHD(NV50_DISP_DAC_PWR)         , nv50_dac_mthd },
+       { DAC_MTHD(NV50_DISP_DAC_LOAD)        , nv50_dac_mthd },
+       {},
+};
+
+static struct nouveau_oclass
+nv50_disp_base_oclass[] = {
+       { NV50_DISP_CLASS, &nv50_disp_base_ofuncs, nv50_disp_base_omthds },
+       {}
 };
 
 static struct nouveau_oclass
 nv50_disp_sclass[] = {
-       {},
+       { NV50_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
+       { NV50_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs },
+       { NV50_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs },
+       { NV50_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs },
+       { NV50_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs },
+       {}
+};
+
+/*******************************************************************************
+ * Display context, tracks instmem allocation and prevents more than one
+ * client using the display hardware at any time.
+ ******************************************************************************/
+
+static int
+nv50_disp_data_ctor(struct nouveau_object *parent,
+                   struct nouveau_object *engine,
+                   struct nouveau_oclass *oclass, void *data, u32 size,
+                   struct nouveau_object **pobject)
+{
+       struct nv50_disp_priv *priv = (void *)engine;
+       struct nouveau_engctx *ectx;
+       int ret = -EBUSY;
+
+       /* no context needed for channel objects... */
+       if (nv_mclass(parent) != NV_DEVICE_CLASS) {
+               atomic_inc(&parent->refcount);
+               *pobject = parent;
+               return 0;
+       }
+
+       /* allocate display hardware to client */
+       mutex_lock(&nv_subdev(priv)->mutex);
+       if (list_empty(&nv_engine(priv)->contexts)) {
+               ret = nouveau_engctx_create(parent, engine, oclass, NULL,
+                                           0x10000, 0x10000,
+                                           NVOBJ_FLAG_HEAP, &ectx);
+               *pobject = nv_object(ectx);
+       }
+       mutex_unlock(&nv_subdev(priv)->mutex);
+       return ret;
+}
+
+struct nouveau_oclass
+nv50_disp_cclass = {
+       .handle = NV_ENGCTX(DISP, 0x50),
+       .ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = nv50_disp_data_ctor,
+               .dtor = _nouveau_engctx_dtor,
+               .init = _nouveau_engctx_init,
+               .fini = _nouveau_engctx_fini,
+               .rd32 = _nouveau_engctx_rd32,
+               .wr32 = _nouveau_engctx_wr32,
+       },
 };
 
+/*******************************************************************************
+ * Display engine implementation
+ ******************************************************************************/
+
+static void
+nv50_disp_intr_error(struct nv50_disp_priv *priv)
+{
+       u32 channels = (nv_rd32(priv, 0x610020) & 0x001f0000) >> 16;
+       u32 addr, data;
+       int chid;
+
+       for (chid = 0; chid < 5; chid++) {
+               if (!(channels & (1 << chid)))
+                       continue;
+
+               nv_wr32(priv, 0x610020, 0x00010000 << chid);
+               addr = nv_rd32(priv, 0x610080 + (chid * 0x08));
+               data = nv_rd32(priv, 0x610084 + (chid * 0x08));
+               nv_wr32(priv, 0x610080 + (chid * 0x08), 0x90000000);
+
+               nv_error(priv, "chid %d mthd 0x%04x data 0x%08x 0x%08x\n",
+                        chid, addr & 0xffc, data, addr);
+       }
+}
+
 static void
 nv50_disp_intr_vblank(struct nv50_disp_priv *priv, int crtc)
 {
@@ -80,30 +800,422 @@ nv50_disp_intr_vblank(struct nv50_disp_priv *priv, int crtc)
                disp->vblank.notify(disp->vblank.data, crtc);
 }
 
+static u16
+exec_lookup(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl,
+           struct dcb_output *dcb, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+           struct nvbios_outp *info)
+{
+       struct nouveau_bios *bios = nouveau_bios(priv);
+       u16 mask, type, data;
+
+       if (outp < 4) {
+               type = DCB_OUTPUT_ANALOG;
+               mask = 0;
+       } else {
+               outp -= 4;
+               switch (ctrl & 0x00000f00) {
+               case 0x00000000: type = DCB_OUTPUT_LVDS; mask = 1; break;
+               case 0x00000100: type = DCB_OUTPUT_TMDS; mask = 1; break;
+               case 0x00000200: type = DCB_OUTPUT_TMDS; mask = 2; break;
+               case 0x00000500: type = DCB_OUTPUT_TMDS; mask = 3; break;
+               case 0x00000800: type = DCB_OUTPUT_DP; mask = 1; break;
+               case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break;
+               default:
+                       nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl);
+                       return 0x0000;
+               }
+       }
+
+       mask  = 0x00c0 & (mask << 6);
+       mask |= 0x0001 << outp;
+       mask |= 0x0100 << head;
+
+       data = dcb_outp_match(bios, type, mask, ver, hdr, dcb);
+       if (!data)
+               return 0x0000;
+
+       return nvbios_outp_match(bios, type, mask, ver, hdr, cnt, len, info);
+}
+
+static bool
+exec_script(struct nv50_disp_priv *priv, int head, int id)
+{
+       struct nouveau_bios *bios = nouveau_bios(priv);
+       struct nvbios_outp info;
+       struct dcb_output dcb;
+       u8  ver, hdr, cnt, len;
+       u16 data;
+       u32 ctrl = 0x00000000;
+       int i;
+
+       for (i = 0; !(ctrl & (1 << head)) && i < 3; i++)
+               ctrl = nv_rd32(priv, 0x610b5c + (i * 8));
+
+       if (nv_device(priv)->chipset  < 0x90 ||
+           nv_device(priv)->chipset == 0x92 ||
+           nv_device(priv)->chipset == 0xa0) {
+               for (i = 0; !(ctrl & (1 << head)) && i < 2; i++)
+                       ctrl = nv_rd32(priv, 0x610b74 + (i * 8));
+               i += 3;
+       } else {
+               for (i = 0; !(ctrl & (1 << head)) && i < 4; i++)
+                       ctrl = nv_rd32(priv, 0x610798 + (i * 8));
+               i += 3;
+       }
+
+       if (!(ctrl & (1 << head)))
+               return false;
+
+       data = exec_lookup(priv, head, i, ctrl, &dcb, &ver, &hdr, &cnt, &len, &info);
+       if (data) {
+               struct nvbios_init init = {
+                       .subdev = nv_subdev(priv),
+                       .bios = bios,
+                       .offset = info.script[id],
+                       .outp = &dcb,
+                       .crtc = head,
+                       .execute = 1,
+               };
+
+               return nvbios_exec(&init) == 0;
+       }
+
+       return false;
+}
+
+static u32
+exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk,
+           struct dcb_output *outp)
+{
+       struct nouveau_bios *bios = nouveau_bios(priv);
+       struct nvbios_outp info1;
+       struct nvbios_ocfg info2;
+       u8  ver, hdr, cnt, len;
+       u16 data, conf;
+       u32 ctrl = 0x00000000;
+       int i;
+
+       for (i = 0; !(ctrl & (1 << head)) && i < 3; i++)
+               ctrl = nv_rd32(priv, 0x610b58 + (i * 8));
+
+       if (nv_device(priv)->chipset  < 0x90 ||
+           nv_device(priv)->chipset == 0x92 ||
+           nv_device(priv)->chipset == 0xa0) {
+               for (i = 0; !(ctrl & (1 << head)) && i < 2; i++)
+                       ctrl = nv_rd32(priv, 0x610b70 + (i * 8));
+               i += 3;
+       } else {
+               for (i = 0; !(ctrl & (1 << head)) && i < 4; i++)
+                       ctrl = nv_rd32(priv, 0x610794 + (i * 8));
+               i += 3;
+       }
+
+       if (!(ctrl & (1 << head)))
+               return 0x0000;
+
+       data = exec_lookup(priv, head, i, ctrl, outp, &ver, &hdr, &cnt, &len, &info1);
+       if (!data)
+               return 0x0000;
+
+       switch (outp->type) {
+       case DCB_OUTPUT_TMDS:
+               conf = (ctrl & 0x00000f00) >> 8;
+               if (pclk >= 165000)
+                       conf |= 0x0100;
+               break;
+       case DCB_OUTPUT_LVDS:
+               conf = priv->sor.lvdsconf;
+               break;
+       case DCB_OUTPUT_DP:
+               conf = (ctrl & 0x00000f00) >> 8;
+               break;
+       case DCB_OUTPUT_ANALOG:
+       default:
+               conf = 0x00ff;
+               break;
+       }
+
+       data = nvbios_ocfg_match(bios, data, conf, &ver, &hdr, &cnt, &len, &info2);
+       if (data) {
+               data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
+               if (data) {
+                       struct nvbios_init init = {
+                               .subdev = nv_subdev(priv),
+                               .bios = bios,
+                               .offset = data,
+                               .outp = outp,
+                               .crtc = head,
+                               .execute = 1,
+                       };
+
+                       if (nvbios_exec(&init))
+                               return 0x0000;
+                       return conf;
+               }
+       }
+
+       return 0x0000;
+}
+
+static void
+nv50_disp_intr_unk10(struct nv50_disp_priv *priv, u32 super)
+{
+       int head = ffs((super & 0x00000060) >> 5) - 1;
+       if (head >= 0) {
+               head = ffs((super & 0x00000180) >> 7) - 1;
+               if (head >= 0)
+                       exec_script(priv, head, 1);
+       }
+
+       nv_wr32(priv, 0x610024, 0x00000010);
+       nv_wr32(priv, 0x610030, 0x80000000);
+}
+
+static void
+nv50_disp_intr_unk20_dp(struct nv50_disp_priv *priv,
+                       struct dcb_output *outp, u32 pclk)
+{
+       const int link = !(outp->sorconf.link & 1);
+       const int   or = ffs(outp->or) - 1;
+       const u32 soff = (  or * 0x800);
+       const u32 loff = (link * 0x080) + soff;
+       const u32 ctrl = nv_rd32(priv, 0x610794 + (or * 8));
+       const u32 symbol = 100000;
+       u32 dpctrl = nv_rd32(priv, 0x61c10c + loff) & 0x0000f0000;
+       u32 clksor = nv_rd32(priv, 0x614300 + soff);
+       int bestTU = 0, bestVTUi = 0, bestVTUf = 0, bestVTUa = 0;
+       int TU, VTUi, VTUf, VTUa;
+       u64 link_data_rate, link_ratio, unk;
+       u32 best_diff = 64 * symbol;
+       u32 link_nr, link_bw, bits, r;
+
+       /* calculate packed data rate for each lane */
+       if      (dpctrl > 0x00030000) link_nr = 4;
+       else if (dpctrl > 0x00010000) link_nr = 2;
+       else                          link_nr = 1;
+
+       if (clksor & 0x000c0000)
+               link_bw = 270000;
+       else
+               link_bw = 162000;
+
+       if      ((ctrl & 0xf0000) == 0x60000) bits = 30;
+       else if ((ctrl & 0xf0000) == 0x50000) bits = 24;
+       else                                  bits = 18;
+
+       link_data_rate = (pclk * bits / 8) / link_nr;
+
+       /* calculate ratio of packed data rate to link symbol rate */
+       link_ratio = link_data_rate * symbol;
+       r = do_div(link_ratio, link_bw);
+
+       for (TU = 64; TU >= 32; TU--) {
+               /* calculate average number of valid symbols in each TU */
+               u32 tu_valid = link_ratio * TU;
+               u32 calc, diff;
+
+               /* find a hw representation for the fraction.. */
+               VTUi = tu_valid / symbol;
+               calc = VTUi * symbol;
+               diff = tu_valid - calc;
+               if (diff) {
+                       if (diff >= (symbol / 2)) {
+                               VTUf = symbol / (symbol - diff);
+                               if (symbol - (VTUf * diff))
+                                       VTUf++;
+
+                               if (VTUf <= 15) {
+                                       VTUa  = 1;
+                                       calc += symbol - (symbol / VTUf);
+                               } else {
+                                       VTUa  = 0;
+                                       VTUf  = 1;
+                                       calc += symbol;
+                               }
+                       } else {
+                               VTUa  = 0;
+                               VTUf  = min((int)(symbol / diff), 15);
+                               calc += symbol / VTUf;
+                       }
+
+                       diff = calc - tu_valid;
+               } else {
+                       /* no remainder, but the hw doesn't like the fractional
+                        * part to be zero.  decrement the integer part and
+                        * have the fraction add a whole symbol back
+                        */
+                       VTUa = 0;
+                       VTUf = 1;
+                       VTUi--;
+               }
+
+               if (diff < best_diff) {
+                       best_diff = diff;
+                       bestTU = TU;
+                       bestVTUa = VTUa;
+                       bestVTUf = VTUf;
+                       bestVTUi = VTUi;
+                       if (diff == 0)
+                               break;
+               }
+       }
+
+       if (!bestTU) {
+               nv_error(priv, "unable to find suitable dp config\n");
+               return;
+       }
+
+       /* XXX close to vbios numbers, but not right */
+       unk  = (symbol - link_ratio) * bestTU;
+       unk *= link_ratio;
+       r = do_div(unk, symbol);
+       r = do_div(unk, symbol);
+       unk += 6;
+
+       nv_mask(priv, 0x61c10c + loff, 0x000001fc, bestTU << 2);
+       nv_mask(priv, 0x61c128 + loff, 0x010f7f3f, bestVTUa << 24 |
+                                                  bestVTUf << 16 |
+                                                  bestVTUi << 8 | unk);
+}
+
+static void
+nv50_disp_intr_unk20(struct nv50_disp_priv *priv, u32 super)
+{
+       struct dcb_output outp;
+       u32 addr, mask, data;
+       int head;
+
+       /* finish detaching encoder? */
+       head = ffs((super & 0x00000180) >> 7) - 1;
+       if (head >= 0)
+               exec_script(priv, head, 2);
+
+       /* check whether a vpll change is required */
+       head = ffs((super & 0x00000600) >> 9) - 1;
+       if (head >= 0) {
+               u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
+               if (pclk) {
+                       struct nouveau_clock *clk = nouveau_clock(priv);
+                       clk->pll_set(clk, PLL_VPLL0 + head, pclk);
+               }
+
+               nv_mask(priv, 0x614200 + head * 0x800, 0x0000000f, 0x00000000);
+       }
+
+       /* (re)attach the relevant OR to the head */
+       head = ffs((super & 0x00000180) >> 7) - 1;
+       if (head >= 0) {
+               u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
+               u32 conf = exec_clkcmp(priv, head, 0, pclk, &outp);
+               if (conf) {
+                       if (outp.type == DCB_OUTPUT_ANALOG) {
+                               addr = 0x614280 + (ffs(outp.or) - 1) * 0x800;
+                               mask = 0xffffffff;
+                               data = 0x00000000;
+                       } else {
+                               if (outp.type == DCB_OUTPUT_DP)
+                                       nv50_disp_intr_unk20_dp(priv, &outp, pclk);
+                               addr = 0x614300 + (ffs(outp.or) - 1) * 0x800;
+                               mask = 0x00000707;
+                               data = (conf & 0x0100) ? 0x0101 : 0x0000;
+                       }
+
+                       nv_mask(priv, addr, mask, data);
+               }
+       }
+
+       nv_wr32(priv, 0x610024, 0x00000020);
+       nv_wr32(priv, 0x610030, 0x80000000);
+}
+
+/* If programming a TMDS output on a SOR that can also be configured for
+ * DisplayPort, make sure NV50_SOR_DP_CTRL_ENABLE is forced off.
+ *
+ * It looks like the VBIOS TMDS scripts make an attempt at this, however,
+ * the VBIOS scripts on at least one board I have only switch it off on
+ * link 0, causing a blank display if the output has previously been
+ * programmed for DisplayPort.
+ */
+static void
+nv50_disp_intr_unk40_tmds(struct nv50_disp_priv *priv, struct dcb_output *outp)
+{
+       struct nouveau_bios *bios = nouveau_bios(priv);
+       const int link = !(outp->sorconf.link & 1);
+       const int   or = ffs(outp->or) - 1;
+       const u32 loff = (or * 0x800) + (link * 0x80);
+       const u16 mask = (outp->sorconf.link << 6) | outp->or;
+       u8  ver, hdr;
+
+       if (dcb_outp_match(bios, DCB_OUTPUT_DP, mask, &ver, &hdr, outp))
+               nv_mask(priv, 0x61c10c + loff, 0x00000001, 0x00000000);
+}
+
 static void
+nv50_disp_intr_unk40(struct nv50_disp_priv *priv, u32 super)
+{
+       int head = ffs((super & 0x00000180) >> 7) - 1;
+       if (head >= 0) {
+               struct dcb_output outp;
+               u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
+               if (pclk && exec_clkcmp(priv, head, 1, pclk, &outp)) {
+                       if (outp.type == DCB_OUTPUT_TMDS)
+                               nv50_disp_intr_unk40_tmds(priv, &outp);
+               }
+       }
+
+       nv_wr32(priv, 0x610024, 0x00000040);
+       nv_wr32(priv, 0x610030, 0x80000000);
+}
+
+static void
+nv50_disp_intr_super(struct nv50_disp_priv *priv, u32 intr1)
+{
+       u32 super = nv_rd32(priv, 0x610030);
+
+       nv_debug(priv, "supervisor 0x%08x 0x%08x\n", intr1, super);
+
+       if (intr1 & 0x00000010)
+               nv50_disp_intr_unk10(priv, super);
+       if (intr1 & 0x00000020)
+               nv50_disp_intr_unk20(priv, super);
+       if (intr1 & 0x00000040)
+               nv50_disp_intr_unk40(priv, super);
+}
+
+void
 nv50_disp_intr(struct nouveau_subdev *subdev)
 {
        struct nv50_disp_priv *priv = (void *)subdev;
-       u32 stat1 = nv_rd32(priv, 0x610024);
+       u32 intr0 = nv_rd32(priv, 0x610020);
+       u32 intr1 = nv_rd32(priv, 0x610024);
 
-       if (stat1 & 0x00000004) {
+       if (intr0 & 0x001f0000) {
+               nv50_disp_intr_error(priv);
+               intr0 &= ~0x001f0000;
+       }
+
+       if (intr1 & 0x00000004) {
                nv50_disp_intr_vblank(priv, 0);
                nv_wr32(priv, 0x610024, 0x00000004);
-               stat1 &= ~0x00000004;
+               intr1 &= ~0x00000004;
        }
 
-       if (stat1 & 0x00000008) {
+       if (intr1 & 0x00000008) {
                nv50_disp_intr_vblank(priv, 1);
                nv_wr32(priv, 0x610024, 0x00000008);
-               stat1 &= ~0x00000008;
+               intr1 &= ~0x00000008;
        }
 
+       if (intr1 & 0x00000070) {
+               nv50_disp_intr_super(priv, intr1);
+               intr1 &= ~0x00000070;
+       }
 }
 
 static int
 nv50_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-                 struct nouveau_oclass *oclass, void *data, u32 size,
-                 struct nouveau_object **pobject)
+              struct nouveau_oclass *oclass, void *data, u32 size,
+              struct nouveau_object **pobject)
 {
        struct nv50_disp_priv *priv;
        int ret;
@@ -114,8 +1226,16 @@ nv50_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        if (ret)
                return ret;
 
-       nv_engine(priv)->sclass = nv50_disp_sclass;
+       nv_engine(priv)->sclass = nv50_disp_base_oclass;
+       nv_engine(priv)->cclass = &nv50_disp_cclass;
        nv_subdev(priv)->intr = nv50_disp_intr;
+       priv->sclass = nv50_disp_sclass;
+       priv->head.nr = 2;
+       priv->dac.nr = 3;
+       priv->sor.nr = 2;
+       priv->dac.power = nv50_dac_power;
+       priv->dac.sense = nv50_dac_sense;
+       priv->sor.power = nv50_sor_power;
 
        INIT_LIST_HEAD(&priv->base.vblank.list);
        spin_lock_init(&priv->base.vblank.lock);
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
new file mode 100644 (file)
index 0000000..a6bb931
--- /dev/null
@@ -0,0 +1,142 @@
+#ifndef __NV50_DISP_H__
+#define __NV50_DISP_H__
+
+#include <core/parent.h>
+#include <core/namedb.h>
+#include <core/ramht.h>
+
+#include <engine/dmaobj.h>
+#include <engine/disp.h>
+
+struct dcb_output;
+
+struct nv50_disp_priv {
+       struct nouveau_disp base;
+       struct nouveau_oclass *sclass;
+       struct {
+               int nr;
+       } head;
+       struct {
+               int nr;
+               int (*power)(struct nv50_disp_priv *, int dac, u32 data);
+               int (*sense)(struct nv50_disp_priv *, int dac, u32 load);
+       } dac;
+       struct {
+               int nr;
+               int (*power)(struct nv50_disp_priv *, int sor, u32 data);
+               int (*hda_eld)(struct nv50_disp_priv *, int sor, u8 *, u32);
+               int (*hdmi)(struct nv50_disp_priv *, int head, int sor, u32);
+               int (*dp_train_init)(struct nv50_disp_priv *, int sor, int link,
+                                    int head, u16 type, u16 mask, u32 data,
+                                    struct dcb_output *);
+               int (*dp_train_fini)(struct nv50_disp_priv *, int sor, int link,
+                                    int head, u16 type, u16 mask, u32 data,
+                                    struct dcb_output *);
+               int (*dp_train)(struct nv50_disp_priv *, int sor, int link,
+                               u16 type, u16 mask, u32 data,
+                               struct dcb_output *);
+               int (*dp_lnkctl)(struct nv50_disp_priv *, int sor, int link,
+                                int head, u16 type, u16 mask, u32 data,
+                                struct dcb_output *);
+               int (*dp_drvctl)(struct nv50_disp_priv *, int sor, int link,
+                                int lane, u16 type, u16 mask, u32 data,
+                                struct dcb_output *);
+               u32 lvdsconf;
+       } sor;
+};
+
+#define DAC_MTHD(n) (n), (n) + 0x03
+
+int nv50_dac_mthd(struct nouveau_object *, u32, void *, u32);
+int nv50_dac_power(struct nv50_disp_priv *, int, u32);
+int nv50_dac_sense(struct nv50_disp_priv *, int, u32);
+
+#define SOR_MTHD(n) (n), (n) + 0x3f
+
+int nva3_hda_eld(struct nv50_disp_priv *, int, u8 *, u32);
+int nvd0_hda_eld(struct nv50_disp_priv *, int, u8 *, u32);
+
+int nv84_hdmi_ctrl(struct nv50_disp_priv *, int, int, u32);
+int nva3_hdmi_ctrl(struct nv50_disp_priv *, int, int, u32);
+int nvd0_hdmi_ctrl(struct nv50_disp_priv *, int, int, u32);
+
+int nv50_sor_mthd(struct nouveau_object *, u32, void *, u32);
+int nv50_sor_power(struct nv50_disp_priv *, int, u32);
+
+int nv94_sor_dp_train_init(struct nv50_disp_priv *, int, int, int, u16, u16,
+                          u32, struct dcb_output *);
+int nv94_sor_dp_train_fini(struct nv50_disp_priv *, int, int, int, u16, u16,
+                          u32, struct dcb_output *);
+int nv94_sor_dp_train(struct nv50_disp_priv *, int, int, u16, u16, u32,
+                     struct dcb_output *);
+int nv94_sor_dp_lnkctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32,
+                      struct dcb_output *);
+int nv94_sor_dp_drvctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32,
+                      struct dcb_output *);
+
+int nvd0_sor_dp_train(struct nv50_disp_priv *, int, int, u16, u16, u32,
+                     struct dcb_output *);
+int nvd0_sor_dp_lnkctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32,
+                      struct dcb_output *);
+int nvd0_sor_dp_drvctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32,
+                      struct dcb_output *);
+
+struct nv50_disp_base {
+       struct nouveau_parent base;
+       struct nouveau_ramht *ramht;
+       u32 chan;
+};
+
+struct nv50_disp_chan {
+       struct nouveau_namedb base;
+       int chid;
+};
+
+int  nv50_disp_chan_create_(struct nouveau_object *, struct nouveau_object *,
+                           struct nouveau_oclass *, int, int, void **);
+void nv50_disp_chan_destroy(struct nv50_disp_chan *);
+u32  nv50_disp_chan_rd32(struct nouveau_object *, u64);
+void nv50_disp_chan_wr32(struct nouveau_object *, u64, u32);
+
+#define nv50_disp_chan_init(a)                                                 \
+       nouveau_namedb_init(&(a)->base)
+#define nv50_disp_chan_fini(a,b)                                               \
+       nouveau_namedb_fini(&(a)->base, (b))
+
+int  nv50_disp_dmac_create_(struct nouveau_object *, struct nouveau_object *,
+                           struct nouveau_oclass *, u32, int, int, void **);
+void nv50_disp_dmac_dtor(struct nouveau_object *);
+
+struct nv50_disp_dmac {
+       struct nv50_disp_chan base;
+       struct nouveau_dmaobj *pushdma;
+       u32 push;
+};
+
+struct nv50_disp_pioc {
+       struct nv50_disp_chan base;
+};
+
+extern struct nouveau_ofuncs nv50_disp_mast_ofuncs;
+extern struct nouveau_ofuncs nv50_disp_sync_ofuncs;
+extern struct nouveau_ofuncs nv50_disp_ovly_ofuncs;
+extern struct nouveau_ofuncs nv50_disp_oimm_ofuncs;
+extern struct nouveau_ofuncs nv50_disp_curs_ofuncs;
+extern struct nouveau_ofuncs nv50_disp_base_ofuncs;
+extern struct nouveau_oclass nv50_disp_cclass;
+void nv50_disp_intr(struct nouveau_subdev *);
+
+extern struct nouveau_omthds nv84_disp_base_omthds[];
+
+extern struct nouveau_omthds nva3_disp_base_omthds[];
+
+extern struct nouveau_ofuncs nvd0_disp_mast_ofuncs;
+extern struct nouveau_ofuncs nvd0_disp_sync_ofuncs;
+extern struct nouveau_ofuncs nvd0_disp_ovly_ofuncs;
+extern struct nouveau_ofuncs nvd0_disp_oimm_ofuncs;
+extern struct nouveau_ofuncs nvd0_disp_curs_ofuncs;
+extern struct nouveau_ofuncs nvd0_disp_base_ofuncs;
+extern struct nouveau_oclass nvd0_disp_cclass;
+void nvd0_disp_intr(struct nouveau_subdev *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
new file mode 100644 (file)
index 0000000..fc84eac
--- /dev/null
@@ -0,0 +1,98 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/software.h>
+#include <engine/disp.h>
+
+#include <core/class.h>
+
+#include "nv50.h"
+
+static struct nouveau_oclass
+nv84_disp_sclass[] = {
+       { NV84_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
+       { NV84_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs },
+       { NV84_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs },
+       { NV84_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs },
+       { NV84_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs },
+       {}
+};
+
+struct nouveau_omthds
+nv84_disp_base_omthds[] = {
+       { SOR_MTHD(NV50_DISP_SOR_PWR)         , nv50_sor_mthd },
+       { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR)    , nv50_sor_mthd },
+       { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
+       { DAC_MTHD(NV50_DISP_DAC_PWR)         , nv50_dac_mthd },
+       { DAC_MTHD(NV50_DISP_DAC_LOAD)        , nv50_dac_mthd },
+       {},
+};
+
+static struct nouveau_oclass
+nv84_disp_base_oclass[] = {
+       { NV84_DISP_CLASS, &nv50_disp_base_ofuncs, nv84_disp_base_omthds },
+       {}
+};
+
+static int
+nv84_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+              struct nouveau_oclass *oclass, void *data, u32 size,
+              struct nouveau_object **pobject)
+{
+       struct nv50_disp_priv *priv;
+       int ret;
+
+       ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
+                                 "display", &priv);
+       *pobject = nv_object(priv);
+       if (ret)
+               return ret;
+
+       nv_engine(priv)->sclass = nv84_disp_base_oclass;
+       nv_engine(priv)->cclass = &nv50_disp_cclass;
+       nv_subdev(priv)->intr = nv50_disp_intr;
+       priv->sclass = nv84_disp_sclass;
+       priv->head.nr = 2;
+       priv->dac.nr = 3;
+       priv->sor.nr = 2;
+       priv->dac.power = nv50_dac_power;
+       priv->dac.sense = nv50_dac_sense;
+       priv->sor.power = nv50_sor_power;
+       priv->sor.hdmi = nv84_hdmi_ctrl;
+
+       INIT_LIST_HEAD(&priv->base.vblank.list);
+       spin_lock_init(&priv->base.vblank.lock);
+       return 0;
+}
+
+struct nouveau_oclass
+nv84_disp_oclass = {
+       .handle = NV_ENGINE(DISP, 0x82),
+       .ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = nv84_disp_ctor,
+               .dtor = _nouveau_disp_dtor,
+               .init = _nouveau_disp_init,
+               .fini = _nouveau_disp_fini,
+       },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
new file mode 100644 (file)
index 0000000..ba9dfd4
--- /dev/null
@@ -0,0 +1,109 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/software.h>
+#include <engine/disp.h>
+
+#include <core/class.h>
+
+#include "nv50.h"
+
+static struct nouveau_oclass
+nv94_disp_sclass[] = {
+       { NV94_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
+       { NV94_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs },
+       { NV94_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs },
+       { NV94_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs },
+       { NV94_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs },
+       {}
+};
+
+static struct nouveau_omthds
+nv94_disp_base_omthds[] = {
+       { SOR_MTHD(NV50_DISP_SOR_PWR)         , nv50_sor_mthd },
+       { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR)    , nv50_sor_mthd },
+       { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
+       { SOR_MTHD(NV94_DISP_SOR_DP_TRAIN)    , nv50_sor_mthd },
+       { SOR_MTHD(NV94_DISP_SOR_DP_LNKCTL)   , nv50_sor_mthd },
+       { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(0)), nv50_sor_mthd },
+       { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(1)), nv50_sor_mthd },
+       { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(2)), nv50_sor_mthd },
+       { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(3)), nv50_sor_mthd },
+       { DAC_MTHD(NV50_DISP_DAC_PWR)         , nv50_dac_mthd },
+       { DAC_MTHD(NV50_DISP_DAC_LOAD)        , nv50_dac_mthd },
+       {},
+};
+
+static struct nouveau_oclass
+nv94_disp_base_oclass[] = {
+       { NV94_DISP_CLASS, &nv50_disp_base_ofuncs, nv94_disp_base_omthds },
+       {}
+};
+
+static int
+nv94_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+              struct nouveau_oclass *oclass, void *data, u32 size,
+              struct nouveau_object **pobject)
+{
+       struct nv50_disp_priv *priv;
+       int ret;
+
+       ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
+                                 "display", &priv);
+       *pobject = nv_object(priv);
+       if (ret)
+               return ret;
+
+       nv_engine(priv)->sclass = nv94_disp_base_oclass;
+       nv_engine(priv)->cclass = &nv50_disp_cclass;
+       nv_subdev(priv)->intr = nv50_disp_intr;
+       priv->sclass = nv94_disp_sclass;
+       priv->head.nr = 2;
+       priv->dac.nr = 3;
+       priv->sor.nr = 4;
+       priv->dac.power = nv50_dac_power;
+       priv->dac.sense = nv50_dac_sense;
+       priv->sor.power = nv50_sor_power;
+       priv->sor.hdmi = nv84_hdmi_ctrl;
+       priv->sor.dp_train = nv94_sor_dp_train;
+       priv->sor.dp_train_init = nv94_sor_dp_train_init;
+       priv->sor.dp_train_fini = nv94_sor_dp_train_fini;
+       priv->sor.dp_lnkctl = nv94_sor_dp_lnkctl;
+       priv->sor.dp_drvctl = nv94_sor_dp_drvctl;
+
+       INIT_LIST_HEAD(&priv->base.vblank.list);
+       spin_lock_init(&priv->base.vblank.lock);
+       return 0;
+}
+
+struct nouveau_oclass
+nv94_disp_oclass = {
+       .handle = NV_ENGINE(DISP, 0x88),
+       .ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = nv94_disp_ctor,
+               .dtor = _nouveau_disp_dtor,
+               .init = _nouveau_disp_init,
+               .fini = _nouveau_disp_fini,
+       },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c
new file mode 100644 (file)
index 0000000..5d63902
--- /dev/null
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/software.h>
+#include <engine/disp.h>
+
+#include <core/class.h>
+
+#include "nv50.h"
+
+static struct nouveau_oclass
+nva0_disp_sclass[] = {
+       { NVA0_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
+       { NVA0_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs },
+       { NVA0_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs },
+       { NVA0_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs },
+       { NVA0_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs },
+       {}
+};
+
+static struct nouveau_oclass
+nva0_disp_base_oclass[] = {
+       { NVA0_DISP_CLASS, &nv50_disp_base_ofuncs, nv84_disp_base_omthds },
+       {}
+};
+
+static int
+nva0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+              struct nouveau_oclass *oclass, void *data, u32 size,
+              struct nouveau_object **pobject)
+{
+       struct nv50_disp_priv *priv;
+       int ret;
+
+       ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
+                                 "display", &priv);
+       *pobject = nv_object(priv);
+       if (ret)
+               return ret;
+
+       nv_engine(priv)->sclass = nva0_disp_base_oclass;
+       nv_engine(priv)->cclass = &nv50_disp_cclass;
+       nv_subdev(priv)->intr = nv50_disp_intr;
+       priv->sclass = nva0_disp_sclass;
+       priv->head.nr = 2;
+       priv->dac.nr = 3;
+       priv->sor.nr = 2;
+       priv->dac.power = nv50_dac_power;
+       priv->dac.sense = nv50_dac_sense;
+       priv->sor.power = nv50_sor_power;
+       priv->sor.hdmi = nv84_hdmi_ctrl;
+
+       INIT_LIST_HEAD(&priv->base.vblank.list);
+       spin_lock_init(&priv->base.vblank.lock);
+       return 0;
+}
+
+struct nouveau_oclass
+nva0_disp_oclass = {
+       .handle = NV_ENGINE(DISP, 0x83),
+       .ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = nva0_disp_ctor,
+               .dtor = _nouveau_disp_dtor,
+               .init = _nouveau_disp_init,
+               .fini = _nouveau_disp_fini,
+       },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
new file mode 100644 (file)
index 0000000..e9192ca
--- /dev/null
@@ -0,0 +1,111 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/software.h>
+#include <engine/disp.h>
+
+#include <core/class.h>
+
+#include "nv50.h"
+
+static struct nouveau_oclass
+nva3_disp_sclass[] = {
+       { NVA3_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
+       { NVA3_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs },
+       { NVA3_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs },
+       { NVA3_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs },
+       { NVA3_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs },
+       {}
+};
+
+struct nouveau_omthds
+nva3_disp_base_omthds[] = {
+       { SOR_MTHD(NV50_DISP_SOR_PWR)         , nv50_sor_mthd },
+       { SOR_MTHD(NVA3_DISP_SOR_HDA_ELD)     , nv50_sor_mthd },
+       { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR)    , nv50_sor_mthd },
+       { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
+       { SOR_MTHD(NV94_DISP_SOR_DP_TRAIN)    , nv50_sor_mthd },
+       { SOR_MTHD(NV94_DISP_SOR_DP_LNKCTL)   , nv50_sor_mthd },
+       { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(0)), nv50_sor_mthd },
+       { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(1)), nv50_sor_mthd },
+       { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(2)), nv50_sor_mthd },
+       { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(3)), nv50_sor_mthd },
+       { DAC_MTHD(NV50_DISP_DAC_PWR)         , nv50_dac_mthd },
+       { DAC_MTHD(NV50_DISP_DAC_LOAD)        , nv50_dac_mthd },
+       {},
+};
+
+static struct nouveau_oclass
+nva3_disp_base_oclass[] = {
+       { NVA3_DISP_CLASS, &nv50_disp_base_ofuncs, nva3_disp_base_omthds },
+       {}
+};
+
+static int
+nva3_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+              struct nouveau_oclass *oclass, void *data, u32 size,
+              struct nouveau_object **pobject)
+{
+       struct nv50_disp_priv *priv;
+       int ret;
+
+       ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
+                                 "display", &priv);
+       *pobject = nv_object(priv);
+       if (ret)
+               return ret;
+
+       nv_engine(priv)->sclass = nva3_disp_base_oclass;
+       nv_engine(priv)->cclass = &nv50_disp_cclass;
+       nv_subdev(priv)->intr = nv50_disp_intr;
+       priv->sclass = nva3_disp_sclass;
+       priv->head.nr = 2;
+       priv->dac.nr = 3;
+       priv->sor.nr = 4;
+       priv->dac.power = nv50_dac_power;
+       priv->dac.sense = nv50_dac_sense;
+       priv->sor.power = nv50_sor_power;
+       priv->sor.hda_eld = nva3_hda_eld;
+       priv->sor.hdmi = nva3_hdmi_ctrl;
+       priv->sor.dp_train = nv94_sor_dp_train;
+       priv->sor.dp_train_init = nv94_sor_dp_train_init;
+       priv->sor.dp_train_fini = nv94_sor_dp_train_fini;
+       priv->sor.dp_lnkctl = nv94_sor_dp_lnkctl;
+       priv->sor.dp_drvctl = nv94_sor_dp_drvctl;
+
+       INIT_LIST_HEAD(&priv->base.vblank.list);
+       spin_lock_init(&priv->base.vblank.lock);
+       return 0;
+}
+
+struct nouveau_oclass
+nva3_disp_oclass = {
+       .handle = NV_ENGINE(DISP, 0x85),
+       .ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = nva3_disp_ctor,
+               .dtor = _nouveau_disp_dtor,
+               .init = _nouveau_disp_init,
+               .fini = _nouveau_disp_fini,
+       },
+};
index d93efbc..9e38ebf 100644 (file)
  * Authors: Ben Skeggs
  */
 
-#include <subdev/bar.h>
+#include <core/object.h>
+#include <core/parent.h>
+#include <core/handle.h>
+#include <core/class.h>
 
 #include <engine/software.h>
 #include <engine/disp.h>
 
-struct nvd0_disp_priv {
-       struct nouveau_disp base;
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+#include <subdev/bar.h>
+#include <subdev/clock.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/disp.h>
+#include <subdev/bios/init.h>
+#include <subdev/bios/pll.h>
+
+#include "nv50.h"
+
+/*******************************************************************************
+ * EVO DMA channel base class
+ ******************************************************************************/
+
+static int
+nvd0_disp_dmac_object_attach(struct nouveau_object *parent,
+                            struct nouveau_object *object, u32 name)
+{
+       struct nv50_disp_base *base = (void *)parent->parent;
+       struct nv50_disp_chan *chan = (void *)parent;
+       u32 addr = nv_gpuobj(object)->node->offset;
+       u32 data = (chan->chid << 27) | (addr << 9) | 0x00000001;
+       return nouveau_ramht_insert(base->ramht, chan->chid, name, data);
+}
+
+static void
+nvd0_disp_dmac_object_detach(struct nouveau_object *parent, int cookie)
+{
+       struct nv50_disp_base *base = (void *)parent->parent;
+       nouveau_ramht_remove(base->ramht, cookie);
+}
+
+static int
+nvd0_disp_dmac_init(struct nouveau_object *object)
+{
+       struct nv50_disp_priv *priv = (void *)object->engine;
+       struct nv50_disp_dmac *dmac = (void *)object;
+       int chid = dmac->base.chid;
+       int ret;
+
+       ret = nv50_disp_chan_init(&dmac->base);
+       if (ret)
+               return ret;
+
+       /* enable error reporting */
+       nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000001 << chid);
+       nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
+
+       /* initialise channel for dma command submission */
+       nv_wr32(priv, 0x610494 + (chid * 0x0010), dmac->push);
+       nv_wr32(priv, 0x610498 + (chid * 0x0010), 0x00010000);
+       nv_wr32(priv, 0x61049c + (chid * 0x0010), 0x00000001);
+       nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00000010, 0x00000010);
+       nv_wr32(priv, 0x640000 + (chid * 0x1000), 0x00000000);
+       nv_wr32(priv, 0x610490 + (chid * 0x0010), 0x00000013);
+
+       /* wait for it to go inactive */
+       if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x80000000, 0x00000000)) {
+               nv_error(dmac, "init: 0x%08x\n",
+                        nv_rd32(priv, 0x610490 + (chid * 0x10)));
+               return -EBUSY;
+       }
+
+       return 0;
+}
+
+static int
+nvd0_disp_dmac_fini(struct nouveau_object *object, bool suspend)
+{
+       struct nv50_disp_priv *priv = (void *)object->engine;
+       struct nv50_disp_dmac *dmac = (void *)object;
+       int chid = dmac->base.chid;
+
+       /* deactivate channel */
+       nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00001010, 0x00001000);
+       nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00000003, 0x00000000);
+       if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x001e0000, 0x00000000)) {
+               nv_error(dmac, "fini: 0x%08x\n",
+                        nv_rd32(priv, 0x610490 + (chid * 0x10)));
+               if (suspend)
+                       return -EBUSY;
+       }
+
+       /* disable error reporting */
+       nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000000);
+       nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000000);
+
+       return nv50_disp_chan_fini(&dmac->base, suspend);
+}
+
+/*******************************************************************************
+ * EVO master channel object
+ ******************************************************************************/
+
+static int
+nvd0_disp_mast_ctor(struct nouveau_object *parent,
+                   struct nouveau_object *engine,
+                   struct nouveau_oclass *oclass, void *data, u32 size,
+                   struct nouveau_object **pobject)
+{
+       struct nv50_display_mast_class *args = data;
+       struct nv50_disp_dmac *mast;
+       int ret;
+
+       if (size < sizeof(*args))
+               return -EINVAL;
+
+       ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
+                                    0, sizeof(*mast), (void **)&mast);
+       *pobject = nv_object(mast);
+       if (ret)
+               return ret;
+
+       nv_parent(mast)->object_attach = nvd0_disp_dmac_object_attach;
+       nv_parent(mast)->object_detach = nvd0_disp_dmac_object_detach;
+       return 0;
+}
+
+static int
+nvd0_disp_mast_init(struct nouveau_object *object)
+{
+       struct nv50_disp_priv *priv = (void *)object->engine;
+       struct nv50_disp_dmac *mast = (void *)object;
+       int ret;
+
+       ret = nv50_disp_chan_init(&mast->base);
+       if (ret)
+               return ret;
+
+       /* enable error reporting */
+       nv_mask(priv, 0x610090, 0x00000001, 0x00000001);
+       nv_mask(priv, 0x6100a0, 0x00000001, 0x00000001);
+
+       /* initialise channel for dma command submission */
+       nv_wr32(priv, 0x610494, mast->push);
+       nv_wr32(priv, 0x610498, 0x00010000);
+       nv_wr32(priv, 0x61049c, 0x00000001);
+       nv_mask(priv, 0x610490, 0x00000010, 0x00000010);
+       nv_wr32(priv, 0x640000, 0x00000000);
+       nv_wr32(priv, 0x610490, 0x01000013);
+
+       /* wait for it to go inactive */
+       if (!nv_wait(priv, 0x610490, 0x80000000, 0x00000000)) {
+               nv_error(mast, "init: 0x%08x\n", nv_rd32(priv, 0x610490));
+               return -EBUSY;
+       }
+
+       return 0;
+}
+
+static int
+nvd0_disp_mast_fini(struct nouveau_object *object, bool suspend)
+{
+       struct nv50_disp_priv *priv = (void *)object->engine;
+       struct nv50_disp_dmac *mast = (void *)object;
+
+       /* deactivate channel */
+       nv_mask(priv, 0x610490, 0x00000010, 0x00000000);
+       nv_mask(priv, 0x610490, 0x00000003, 0x00000000);
+       if (!nv_wait(priv, 0x610490, 0x001e0000, 0x00000000)) {
+               nv_error(mast, "fini: 0x%08x\n", nv_rd32(priv, 0x610490));
+               if (suspend)
+                       return -EBUSY;
+       }
+
+       /* disable error reporting */
+       nv_mask(priv, 0x610090, 0x00000001, 0x00000000);
+       nv_mask(priv, 0x6100a0, 0x00000001, 0x00000000);
+
+       return nv50_disp_chan_fini(&mast->base, suspend);
+}
+
+struct nouveau_ofuncs
+nvd0_disp_mast_ofuncs = {
+       .ctor = nvd0_disp_mast_ctor,
+       .dtor = nv50_disp_dmac_dtor,
+       .init = nvd0_disp_mast_init,
+       .fini = nvd0_disp_mast_fini,
+       .rd32 = nv50_disp_chan_rd32,
+       .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO sync channel objects
+ ******************************************************************************/
+
+static int
+nvd0_disp_sync_ctor(struct nouveau_object *parent,
+                   struct nouveau_object *engine,
+                   struct nouveau_oclass *oclass, void *data, u32 size,
+                   struct nouveau_object **pobject)
+{
+       struct nv50_display_sync_class *args = data;
+       struct nv50_disp_priv *priv = (void *)engine;
+       struct nv50_disp_dmac *dmac;
+       int ret;
+
+       if (size < sizeof(*data) || args->head >= priv->head.nr)
+               return -EINVAL;
+
+       ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
+                                    1 + args->head, sizeof(*dmac),
+                                    (void **)&dmac);
+       *pobject = nv_object(dmac);
+       if (ret)
+               return ret;
+
+       nv_parent(dmac)->object_attach = nvd0_disp_dmac_object_attach;
+       nv_parent(dmac)->object_detach = nvd0_disp_dmac_object_detach;
+       return 0;
+}
+
+struct nouveau_ofuncs
+nvd0_disp_sync_ofuncs = {
+       .ctor = nvd0_disp_sync_ctor,
+       .dtor = nv50_disp_dmac_dtor,
+       .init = nvd0_disp_dmac_init,
+       .fini = nvd0_disp_dmac_fini,
+       .rd32 = nv50_disp_chan_rd32,
+       .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO overlay channel objects
+ ******************************************************************************/
+
+static int
+nvd0_disp_ovly_ctor(struct nouveau_object *parent,
+                   struct nouveau_object *engine,
+                   struct nouveau_oclass *oclass, void *data, u32 size,
+                   struct nouveau_object **pobject)
+{
+       struct nv50_display_ovly_class *args = data;
+       struct nv50_disp_priv *priv = (void *)engine;
+       struct nv50_disp_dmac *dmac;
+       int ret;
+
+       if (size < sizeof(*data) || args->head >= priv->head.nr)
+               return -EINVAL;
+
+       ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
+                                    5 + args->head, sizeof(*dmac),
+                                    (void **)&dmac);
+       *pobject = nv_object(dmac);
+       if (ret)
+               return ret;
+
+       nv_parent(dmac)->object_attach = nvd0_disp_dmac_object_attach;
+       nv_parent(dmac)->object_detach = nvd0_disp_dmac_object_detach;
+       return 0;
+}
+
+struct nouveau_ofuncs
+nvd0_disp_ovly_ofuncs = {
+       .ctor = nvd0_disp_ovly_ctor,
+       .dtor = nv50_disp_dmac_dtor,
+       .init = nvd0_disp_dmac_init,
+       .fini = nvd0_disp_dmac_fini,
+       .rd32 = nv50_disp_chan_rd32,
+       .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO PIO channel base class
+ ******************************************************************************/
+
+static int
+nvd0_disp_pioc_create_(struct nouveau_object *parent,
+                      struct nouveau_object *engine,
+                      struct nouveau_oclass *oclass, int chid,
+                      int length, void **pobject)
+{
+       return nv50_disp_chan_create_(parent, engine, oclass, chid,
+                                     length, pobject);
+}
+
+static void
+nvd0_disp_pioc_dtor(struct nouveau_object *object)
+{
+       struct nv50_disp_pioc *pioc = (void *)object;
+       nv50_disp_chan_destroy(&pioc->base);
+}
+
+static int
+nvd0_disp_pioc_init(struct nouveau_object *object)
+{
+       struct nv50_disp_priv *priv = (void *)object->engine;
+       struct nv50_disp_pioc *pioc = (void *)object;
+       int chid = pioc->base.chid;
+       int ret;
+
+       ret = nv50_disp_chan_init(&pioc->base);
+       if (ret)
+               return ret;
+
+       /* enable error reporting */
+       nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000001 << chid);
+       nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
+
+       /* activate channel */
+       nv_wr32(priv, 0x610490 + (chid * 0x10), 0x00000001);
+       if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x00030000, 0x00010000)) {
+               nv_error(pioc, "init: 0x%08x\n",
+                        nv_rd32(priv, 0x610490 + (chid * 0x10)));
+               return -EBUSY;
+       }
+
+       return 0;
+}
+
+static int
+nvd0_disp_pioc_fini(struct nouveau_object *object, bool suspend)
+{
+       struct nv50_disp_priv *priv = (void *)object->engine;
+       struct nv50_disp_pioc *pioc = (void *)object;
+       int chid = pioc->base.chid;
+
+       nv_mask(priv, 0x610490 + (chid * 0x10), 0x00000001, 0x00000000);
+       if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x00030000, 0x00000000)) {
+               nv_error(pioc, "timeout: 0x%08x\n",
+                        nv_rd32(priv, 0x610490 + (chid * 0x10)));
+               if (suspend)
+                       return -EBUSY;
+       }
+
+       /* disable error reporting */
+       nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000000);
+       nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000000);
+
+       return nv50_disp_chan_fini(&pioc->base, suspend);
+}
+
+/*******************************************************************************
+ * EVO immediate overlay channel objects
+ ******************************************************************************/
+
+static int
+nvd0_disp_oimm_ctor(struct nouveau_object *parent,
+                   struct nouveau_object *engine,
+                   struct nouveau_oclass *oclass, void *data, u32 size,
+                   struct nouveau_object **pobject)
+{
+       struct nv50_display_oimm_class *args = data;
+       struct nv50_disp_priv *priv = (void *)engine;
+       struct nv50_disp_pioc *pioc;
+       int ret;
+
+       if (size < sizeof(*args) || args->head >= priv->head.nr)
+               return -EINVAL;
+
+       ret = nvd0_disp_pioc_create_(parent, engine, oclass, 9 + args->head,
+                                    sizeof(*pioc), (void **)&pioc);
+       *pobject = nv_object(pioc);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+struct nouveau_ofuncs
+nvd0_disp_oimm_ofuncs = {
+       .ctor = nvd0_disp_oimm_ctor,
+       .dtor = nvd0_disp_pioc_dtor,
+       .init = nvd0_disp_pioc_init,
+       .fini = nvd0_disp_pioc_fini,
+       .rd32 = nv50_disp_chan_rd32,
+       .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * EVO cursor channel objects
+ ******************************************************************************/
+
+static int
+nvd0_disp_curs_ctor(struct nouveau_object *parent,
+                   struct nouveau_object *engine,
+                   struct nouveau_oclass *oclass, void *data, u32 size,
+                   struct nouveau_object **pobject)
+{
+       struct nv50_display_curs_class *args = data;
+       struct nv50_disp_priv *priv = (void *)engine;
+       struct nv50_disp_pioc *pioc;
+       int ret;
+
+       if (size < sizeof(*args) || args->head >= priv->head.nr)
+               return -EINVAL;
+
+       ret = nvd0_disp_pioc_create_(parent, engine, oclass, 13 + args->head,
+                                    sizeof(*pioc), (void **)&pioc);
+       *pobject = nv_object(pioc);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+struct nouveau_ofuncs
+nvd0_disp_curs_ofuncs = {
+       .ctor = nvd0_disp_curs_ctor,
+       .dtor = nvd0_disp_pioc_dtor,
+       .init = nvd0_disp_pioc_init,
+       .fini = nvd0_disp_pioc_fini,
+       .rd32 = nv50_disp_chan_rd32,
+       .wr32 = nv50_disp_chan_wr32,
+};
+
+/*******************************************************************************
+ * Base display object
+ ******************************************************************************/
+
+static int
+nvd0_disp_base_ctor(struct nouveau_object *parent,
+                   struct nouveau_object *engine,
+                   struct nouveau_oclass *oclass, void *data, u32 size,
+                   struct nouveau_object **pobject)
+{
+       struct nv50_disp_priv *priv = (void *)engine;
+       struct nv50_disp_base *base;
+       int ret;
+
+       ret = nouveau_parent_create(parent, engine, oclass, 0,
+                                   priv->sclass, 0, &base);
+       *pobject = nv_object(base);
+       if (ret)
+               return ret;
+
+       return nouveau_ramht_new(parent, parent, 0x1000, 0, &base->ramht);
+}
+
+static void
+nvd0_disp_base_dtor(struct nouveau_object *object)
+{
+       struct nv50_disp_base *base = (void *)object;
+       nouveau_ramht_ref(NULL, &base->ramht);
+       nouveau_parent_destroy(&base->base);
+}
+
+static int
+nvd0_disp_base_init(struct nouveau_object *object)
+{
+       struct nv50_disp_priv *priv = (void *)object->engine;
+       struct nv50_disp_base *base = (void *)object;
+       int ret, i;
+       u32 tmp;
+
+       ret = nouveau_parent_init(&base->base);
+       if (ret)
+               return ret;
+
+       /* The below segments of code copying values from one register to
+        * another appear to inform EVO of the display capabilities or
+        * something similar.
+        */
+
+       /* ... CRTC caps */
+       for (i = 0; i < priv->head.nr; i++) {
+               tmp = nv_rd32(priv, 0x616104 + (i * 0x800));
+               nv_wr32(priv, 0x6101b4 + (i * 0x800), tmp);
+               tmp = nv_rd32(priv, 0x616108 + (i * 0x800));
+               nv_wr32(priv, 0x6101b8 + (i * 0x800), tmp);
+               tmp = nv_rd32(priv, 0x61610c + (i * 0x800));
+               nv_wr32(priv, 0x6101bc + (i * 0x800), tmp);
+       }
+
+       /* ... DAC caps */
+       for (i = 0; i < priv->dac.nr; i++) {
+               tmp = nv_rd32(priv, 0x61a000 + (i * 0x800));
+               nv_wr32(priv, 0x6101c0 + (i * 0x800), tmp);
+       }
+
+       /* ... SOR caps */
+       for (i = 0; i < priv->sor.nr; i++) {
+               tmp = nv_rd32(priv, 0x61c000 + (i * 0x800));
+               nv_wr32(priv, 0x6301c4 + (i * 0x800), tmp);
+       }
+
+       /* steal display away from vbios, or something like that */
+       if (nv_rd32(priv, 0x6100ac) & 0x00000100) {
+               nv_wr32(priv, 0x6100ac, 0x00000100);
+               nv_mask(priv, 0x6194e8, 0x00000001, 0x00000000);
+               if (!nv_wait(priv, 0x6194e8, 0x00000002, 0x00000000)) {
+                       nv_error(priv, "timeout acquiring display\n");
+                       return -EBUSY;
+               }
+       }
+
+       /* point at display engine memory area (hash table, objects) */
+       nv_wr32(priv, 0x610010, (nv_gpuobj(object->parent)->addr >> 8) | 9);
+
+       /* enable supervisor interrupts, disable everything else */
+       nv_wr32(priv, 0x610090, 0x00000000);
+       nv_wr32(priv, 0x6100a0, 0x00000000);
+       nv_wr32(priv, 0x6100b0, 0x00000307);
+
+       return 0;
+}
+
+static int
+nvd0_disp_base_fini(struct nouveau_object *object, bool suspend)
+{
+       struct nv50_disp_priv *priv = (void *)object->engine;
+       struct nv50_disp_base *base = (void *)object;
+
+       /* disable all interrupts */
+       nv_wr32(priv, 0x6100b0, 0x00000000);
+
+       return nouveau_parent_fini(&base->base, suspend);
+}
+
+struct nouveau_ofuncs
+nvd0_disp_base_ofuncs = {
+       .ctor = nvd0_disp_base_ctor,
+       .dtor = nvd0_disp_base_dtor,
+       .init = nvd0_disp_base_init,
+       .fini = nvd0_disp_base_fini,
+};
+
+static struct nouveau_oclass
+nvd0_disp_base_oclass[] = {
+       { NVD0_DISP_CLASS, &nvd0_disp_base_ofuncs, nva3_disp_base_omthds },
+       {}
 };
 
 static struct nouveau_oclass
 nvd0_disp_sclass[] = {
-       {},
+       { NVD0_DISP_MAST_CLASS, &nvd0_disp_mast_ofuncs },
+       { NVD0_DISP_SYNC_CLASS, &nvd0_disp_sync_ofuncs },
+       { NVD0_DISP_OVLY_CLASS, &nvd0_disp_ovly_ofuncs },
+       { NVD0_DISP_OIMM_CLASS, &nvd0_disp_oimm_ofuncs },
+       { NVD0_DISP_CURS_CLASS, &nvd0_disp_curs_ofuncs },
+       {}
 };
 
+/*******************************************************************************
+ * Display engine implementation
+ ******************************************************************************/
+
+static u16
+exec_lookup(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl,
+           struct dcb_output *dcb, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+           struct nvbios_outp *info)
+{
+       struct nouveau_bios *bios = nouveau_bios(priv);
+       u16 mask, type, data;
+
+       if (outp < 4) {
+               type = DCB_OUTPUT_ANALOG;
+               mask = 0;
+       } else {
+               outp -= 4;
+               switch (ctrl & 0x00000f00) {
+               case 0x00000000: type = DCB_OUTPUT_LVDS; mask = 1; break;
+               case 0x00000100: type = DCB_OUTPUT_TMDS; mask = 1; break;
+               case 0x00000200: type = DCB_OUTPUT_TMDS; mask = 2; break;
+               case 0x00000500: type = DCB_OUTPUT_TMDS; mask = 3; break;
+               case 0x00000800: type = DCB_OUTPUT_DP; mask = 1; break;
+               case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break;
+               default:
+                       nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl);
+                       return 0x0000;
+               }
+               dcb->sorconf.link = mask;
+       }
+
+       mask  = 0x00c0 & (mask << 6);
+       mask |= 0x0001 << outp;
+       mask |= 0x0100 << head;
+
+       data = dcb_outp_match(bios, type, mask, ver, hdr, dcb);
+       if (!data)
+               return 0x0000;
+
+       return nvbios_outp_match(bios, type, mask, ver, hdr, cnt, len, info);
+}
+
+static bool
+exec_script(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl, int id)
+{
+       struct nouveau_bios *bios = nouveau_bios(priv);
+       struct nvbios_outp info;
+       struct dcb_output dcb;
+       u8  ver, hdr, cnt, len;
+       u16 data;
+
+       data = exec_lookup(priv, head, outp, ctrl, &dcb, &ver, &hdr, &cnt, &len, &info);
+       if (data) {
+               struct nvbios_init init = {
+                       .subdev = nv_subdev(priv),
+                       .bios = bios,
+                       .offset = info.script[id],
+                       .outp = &dcb,
+                       .crtc = head,
+                       .execute = 1,
+               };
+
+               return nvbios_exec(&init) == 0;
+       }
+
+       return false;
+}
+
+static u32
+exec_clkcmp(struct nv50_disp_priv *priv, int head, int outp,
+           u32 ctrl, int id, u32 pclk)
+{
+       struct nouveau_bios *bios = nouveau_bios(priv);
+       struct nvbios_outp info1;
+       struct nvbios_ocfg info2;
+       struct dcb_output dcb;
+       u8  ver, hdr, cnt, len;
+       u16 data, conf;
+
+       data = exec_lookup(priv, head, outp, ctrl, &dcb, &ver, &hdr, &cnt, &len, &info1);
+       if (data == 0x0000)
+               return false;
+
+       switch (dcb.type) {
+       case DCB_OUTPUT_TMDS:
+               conf = (ctrl & 0x00000f00) >> 8;
+               if (pclk >= 165000)
+                       conf |= 0x0100;
+               break;
+       case DCB_OUTPUT_LVDS:
+               conf = priv->sor.lvdsconf;
+               break;
+       case DCB_OUTPUT_DP:
+               conf = (ctrl & 0x00000f00) >> 8;
+               break;
+       case DCB_OUTPUT_ANALOG:
+       default:
+               conf = 0x00ff;
+               break;
+       }
+
+       data = nvbios_ocfg_match(bios, data, conf, &ver, &hdr, &cnt, &len, &info2);
+       if (data) {
+               data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
+               if (data) {
+                       struct nvbios_init init = {
+                               .subdev = nv_subdev(priv),
+                               .bios = bios,
+                               .offset = data,
+                               .outp = &dcb,
+                               .crtc = head,
+                               .execute = 1,
+                       };
+
+                       if (nvbios_exec(&init))
+                               return 0x0000;
+                       return conf;
+               }
+       }
+
+       return 0x0000;
+}
+
+static void
+nvd0_display_unk1_handler(struct nv50_disp_priv *priv, u32 head, u32 mask)
+{
+       int i;
+
+       for (i = 0; mask && i < 8; i++) {
+               u32 mcc = nv_rd32(priv, 0x640180 + (i * 0x20));
+               if (mcc & (1 << head))
+                       exec_script(priv, head, i, mcc, 1);
+       }
+
+       nv_wr32(priv, 0x6101d4, 0x00000000);
+       nv_wr32(priv, 0x6109d4, 0x00000000);
+       nv_wr32(priv, 0x6101d0, 0x80000000);
+}
+
 static void
-nvd0_disp_intr_vblank(struct nvd0_disp_priv *priv, int crtc)
+nvd0_display_unk2_calc_tu(struct nv50_disp_priv *priv, int head, int or)
+{
+       const u32 ctrl = nv_rd32(priv, 0x660200 + (or   * 0x020));
+       const u32 conf = nv_rd32(priv, 0x660404 + (head * 0x300));
+       const u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
+       const u32 link = ((ctrl & 0xf00) == 0x800) ? 0 : 1;
+       const u32 hoff = (head * 0x800);
+       const u32 soff = (  or * 0x800);
+       const u32 loff = (link * 0x080) + soff;
+       const u32 symbol = 100000;
+       const u32 TU = 64;
+       u32 dpctrl = nv_rd32(priv, 0x61c10c + loff) & 0x000f0000;
+       u32 clksor = nv_rd32(priv, 0x612300 + soff);
+       u32 datarate, link_nr, link_bw, bits;
+       u64 ratio, value;
+
+       if      ((conf & 0x3c0) == 0x180) bits = 30;
+       else if ((conf & 0x3c0) == 0x140) bits = 24;
+       else                              bits = 18;
+       datarate = (pclk * bits) / 8;
+
+       if      (dpctrl > 0x00030000) link_nr = 4;
+       else if (dpctrl > 0x00010000) link_nr = 2;
+       else                          link_nr = 1;
+
+       link_bw  = (clksor & 0x007c0000) >> 18;
+       link_bw *= 27000;
+
+       ratio  = datarate;
+       ratio *= symbol;
+       do_div(ratio, link_nr * link_bw);
+
+       value  = (symbol - ratio) * TU;
+       value *= ratio;
+       do_div(value, symbol);
+       do_div(value, symbol);
+
+       value += 5;
+       value |= 0x08000000;
+
+       nv_wr32(priv, 0x616610 + hoff, value);
+}
+
+static void
+nvd0_display_unk2_handler(struct nv50_disp_priv *priv, u32 head, u32 mask)
+{
+       u32 pclk;
+       int i;
+
+       for (i = 0; mask && i < 8; i++) {
+               u32 mcc = nv_rd32(priv, 0x640180 + (i * 0x20));
+               if (mcc & (1 << head))
+                       exec_script(priv, head, i, mcc, 2);
+       }
+
+       pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
+       nv_debug(priv, "head %d pclk %d mask 0x%08x\n", head, pclk, mask);
+       if (pclk && (mask & 0x00010000)) {
+               struct nouveau_clock *clk = nouveau_clock(priv);
+               clk->pll_set(clk, PLL_VPLL0 + head, pclk);
+       }
+
+       nv_wr32(priv, 0x612200 + (head * 0x800), 0x00000000);
+
+       for (i = 0; mask && i < 8; i++) {
+               u32 mcp = nv_rd32(priv, 0x660180 + (i * 0x20)), cfg;
+               if (mcp & (1 << head)) {
+                       if ((cfg = exec_clkcmp(priv, head, i, mcp, 0, pclk))) {
+                               u32 addr, mask, data = 0x00000000;
+                               if (i < 4) {
+                                       addr = 0x612280 + ((i - 0) * 0x800);
+                                       mask = 0xffffffff;
+                               } else {
+                                       switch (mcp & 0x00000f00) {
+                                       case 0x00000800:
+                                       case 0x00000900:
+                                               nvd0_display_unk2_calc_tu(priv, head, i - 4);
+                                               break;
+                                       default:
+                                               break;
+                                       }
+
+                                       addr = 0x612300 + ((i - 4) * 0x800);
+                                       mask = 0x00000707;
+                                       if (cfg & 0x00000100)
+                                               data = 0x00000101;
+                               }
+                               nv_mask(priv, addr, mask, data);
+                       }
+                       break;
+               }
+       }
+
+       nv_wr32(priv, 0x6101d4, 0x00000000);
+       nv_wr32(priv, 0x6109d4, 0x00000000);
+       nv_wr32(priv, 0x6101d0, 0x80000000);
+}
+
+static void
+nvd0_display_unk4_handler(struct nv50_disp_priv *priv, u32 head, u32 mask)
+{
+       int pclk, i;
+
+       pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
+
+       for (i = 0; mask && i < 8; i++) {
+               u32 mcp = nv_rd32(priv, 0x660180 + (i * 0x20));
+               if (mcp & (1 << head))
+                       exec_clkcmp(priv, head, i, mcp, 1, pclk);
+       }
+
+       nv_wr32(priv, 0x6101d4, 0x00000000);
+       nv_wr32(priv, 0x6109d4, 0x00000000);
+       nv_wr32(priv, 0x6101d0, 0x80000000);
+}
+
+static void
+nvd0_disp_intr_vblank(struct nv50_disp_priv *priv, int crtc)
 {
        struct nouveau_bar *bar = nouveau_bar(priv);
        struct nouveau_disp *disp = &priv->base;
@@ -65,14 +851,71 @@ nvd0_disp_intr_vblank(struct nvd0_disp_priv *priv, int crtc)
                disp->vblank.notify(disp->vblank.data, crtc);
 }
 
-static void
+void
 nvd0_disp_intr(struct nouveau_subdev *subdev)
 {
-       struct nvd0_disp_priv *priv = (void *)subdev;
+       struct nv50_disp_priv *priv = (void *)subdev;
        u32 intr = nv_rd32(priv, 0x610088);
        int i;
 
-       for (i = 0; i < 4; i++) {
+       if (intr & 0x00000001) {
+               u32 stat = nv_rd32(priv, 0x61008c);
+               nv_wr32(priv, 0x61008c, stat);
+               intr &= ~0x00000001;
+       }
+
+       if (intr & 0x00000002) {
+               u32 stat = nv_rd32(priv, 0x61009c);
+               int chid = ffs(stat) - 1;
+               if (chid >= 0) {
+                       u32 mthd = nv_rd32(priv, 0x6101f0 + (chid * 12));
+                       u32 data = nv_rd32(priv, 0x6101f4 + (chid * 12));
+                       u32 unkn = nv_rd32(priv, 0x6101f8 + (chid * 12));
+
+                       nv_error(priv, "chid %d mthd 0x%04x data 0x%08x "
+                                      "0x%08x 0x%08x\n",
+                                chid, (mthd & 0x0000ffc), data, mthd, unkn);
+                       nv_wr32(priv, 0x61009c, (1 << chid));
+                       nv_wr32(priv, 0x6101f0 + (chid * 12), 0x90000000);
+               }
+
+               intr &= ~0x00000002;
+       }
+
+       if (intr & 0x00100000) {
+               u32 stat = nv_rd32(priv, 0x6100ac);
+               u32 mask = 0, crtc = ~0;
+
+               while (!mask && ++crtc < priv->head.nr)
+                       mask = nv_rd32(priv, 0x6101d4 + (crtc * 0x800));
+
+               if (stat & 0x00000001) {
+                       nv_wr32(priv, 0x6100ac, 0x00000001);
+                       nvd0_display_unk1_handler(priv, crtc, mask);
+                       stat &= ~0x00000001;
+               }
+
+               if (stat & 0x00000002) {
+                       nv_wr32(priv, 0x6100ac, 0x00000002);
+                       nvd0_display_unk2_handler(priv, crtc, mask);
+                       stat &= ~0x00000002;
+               }
+
+               if (stat & 0x00000004) {
+                       nv_wr32(priv, 0x6100ac, 0x00000004);
+                       nvd0_display_unk4_handler(priv, crtc, mask);
+                       stat &= ~0x00000004;
+               }
+
+               if (stat) {
+                       nv_info(priv, "unknown intr24 0x%08x\n", stat);
+                       nv_wr32(priv, 0x6100ac, stat);
+               }
+
+               intr &= ~0x00100000;
+       }
+
+       for (i = 0; i < priv->head.nr; i++) {
                u32 mask = 0x01000000 << i;
                if (mask & intr) {
                        u32 stat = nv_rd32(priv, 0x6100bc + (i * 0x800));
@@ -86,10 +929,10 @@ nvd0_disp_intr(struct nouveau_subdev *subdev)
 
 static int
 nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-                 struct nouveau_oclass *oclass, void *data, u32 size,
-                 struct nouveau_object **pobject)
+              struct nouveau_oclass *oclass, void *data, u32 size,
+              struct nouveau_object **pobject)
 {
-       struct nvd0_disp_priv *priv;
+       struct nv50_disp_priv *priv;
        int ret;
 
        ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
@@ -98,8 +941,23 @@ nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        if (ret)
                return ret;
 
-       nv_engine(priv)->sclass = nvd0_disp_sclass;
+       nv_engine(priv)->sclass = nvd0_disp_base_oclass;
+       nv_engine(priv)->cclass = &nv50_disp_cclass;
        nv_subdev(priv)->intr = nvd0_disp_intr;
+       priv->sclass = nvd0_disp_sclass;
+       priv->head.nr = nv_rd32(priv, 0x022448);
+       priv->dac.nr = 3;
+       priv->sor.nr = 4;
+       priv->dac.power = nv50_dac_power;
+       priv->dac.sense = nv50_dac_sense;
+       priv->sor.power = nv50_sor_power;
+       priv->sor.hda_eld = nvd0_hda_eld;
+       priv->sor.hdmi = nvd0_hdmi_ctrl;
+       priv->sor.dp_train = nvd0_sor_dp_train;
+       priv->sor.dp_train_init = nv94_sor_dp_train_init;
+       priv->sor.dp_train_fini = nv94_sor_dp_train_fini;
+       priv->sor.dp_lnkctl = nvd0_sor_dp_lnkctl;
+       priv->sor.dp_drvctl = nvd0_sor_dp_drvctl;
 
        INIT_LIST_HEAD(&priv->base.vblank.list);
        spin_lock_init(&priv->base.vblank.lock);
@@ -108,7 +966,7 @@ nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 
 struct nouveau_oclass
 nvd0_disp_oclass = {
-       .handle = NV_ENGINE(DISP, 0xd0),
+       .handle = NV_ENGINE(DISP, 0x90),
        .ofuncs = &(struct nouveau_ofuncs) {
                .ctor = nvd0_disp_ctor,
                .dtor = _nouveau_disp_dtor,
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
new file mode 100644 (file)
index 0000000..259537c
--- /dev/null
@@ -0,0 +1,94 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/software.h>
+#include <engine/disp.h>
+
+#include <core/class.h>
+
+#include "nv50.h"
+
+static struct nouveau_oclass
+nve0_disp_sclass[] = {
+       { NVE0_DISP_MAST_CLASS, &nvd0_disp_mast_ofuncs },
+       { NVE0_DISP_SYNC_CLASS, &nvd0_disp_sync_ofuncs },
+       { NVE0_DISP_OVLY_CLASS, &nvd0_disp_ovly_ofuncs },
+       { NVE0_DISP_OIMM_CLASS, &nvd0_disp_oimm_ofuncs },
+       { NVE0_DISP_CURS_CLASS, &nvd0_disp_curs_ofuncs },
+       {}
+};
+
+static struct nouveau_oclass
+nve0_disp_base_oclass[] = {
+       { NVE0_DISP_CLASS, &nvd0_disp_base_ofuncs, nva3_disp_base_omthds },
+       {}
+};
+
+static int
+nve0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+              struct nouveau_oclass *oclass, void *data, u32 size,
+              struct nouveau_object **pobject)
+{
+       struct nv50_disp_priv *priv;
+       int ret;
+
+       ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
+                                 "display", &priv);
+       *pobject = nv_object(priv);
+       if (ret)
+               return ret;
+
+       nv_engine(priv)->sclass = nve0_disp_base_oclass;
+       nv_engine(priv)->cclass = &nv50_disp_cclass;
+       nv_subdev(priv)->intr = nvd0_disp_intr;
+       priv->sclass = nve0_disp_sclass;
+       priv->head.nr = nv_rd32(priv, 0x022448);
+       priv->dac.nr = 3;
+       priv->sor.nr = 4;
+       priv->dac.power = nv50_dac_power;
+       priv->dac.sense = nv50_dac_sense;
+       priv->sor.power = nv50_sor_power;
+       priv->sor.hda_eld = nvd0_hda_eld;
+       priv->sor.hdmi = nvd0_hdmi_ctrl;
+       priv->sor.dp_train = nvd0_sor_dp_train;
+       priv->sor.dp_train_init = nv94_sor_dp_train_init;
+       priv->sor.dp_train_fini = nv94_sor_dp_train_fini;
+       priv->sor.dp_lnkctl = nvd0_sor_dp_lnkctl;
+       priv->sor.dp_drvctl = nvd0_sor_dp_drvctl;
+
+       INIT_LIST_HEAD(&priv->base.vblank.list);
+       spin_lock_init(&priv->base.vblank.lock);
+       return 0;
+}
+
+struct nouveau_oclass
+nve0_disp_oclass = {
+       .handle = NV_ENGINE(DISP, 0x91),
+       .ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = nve0_disp_ctor,
+               .dtor = _nouveau_disp_dtor,
+               .init = _nouveau_disp_init,
+               .fini = _nouveau_disp_fini,
+       },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
new file mode 100644 (file)
index 0000000..39b6b67
--- /dev/null
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/timer.h>
+
+#include "nv50.h"
+
+int
+nv50_sor_power(struct nv50_disp_priv *priv, int or, u32 data)
+{
+       const u32 stat = data & NV50_DISP_SOR_PWR_STATE;
+       const u32 soff = (or * 0x800);
+       nv_wait(priv, 0x61c004 + soff, 0x80000000, 0x00000000);
+       nv_mask(priv, 0x61c004 + soff, 0x80000001, 0x80000000 | stat);
+       nv_wait(priv, 0x61c004 + soff, 0x80000000, 0x00000000);
+       nv_wait(priv, 0x61c030 + soff, 0x10000000, 0x00000000);
+       return 0;
+}
+
+int
+nv50_sor_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size)
+{
+       struct nv50_disp_priv *priv = (void *)object->engine;
+       struct nouveau_bios *bios = nouveau_bios(priv);
+       const u16 type = (mthd & NV50_DISP_SOR_MTHD_TYPE) >> 12;
+       const u8  head = (mthd & NV50_DISP_SOR_MTHD_HEAD) >> 3;
+       const u8  link = (mthd & NV50_DISP_SOR_MTHD_LINK) >> 2;
+       const u8    or = (mthd & NV50_DISP_SOR_MTHD_OR);
+       const u16 mask = (0x0100 << head) | (0x0040 << link) | (0x0001 << or);
+       struct dcb_output outp;
+       u8  ver, hdr;
+       u32 data;
+       int ret = -EINVAL;
+
+       if (size < sizeof(u32))
+               return -EINVAL;
+       data = *(u32 *)args;
+
+       if (type && !dcb_outp_match(bios, type, mask, &ver, &hdr, &outp))
+               return -ENODEV;
+
+       switch (mthd & ~0x3f) {
+       case NV50_DISP_SOR_PWR:
+               ret = priv->sor.power(priv, or, data);
+               break;
+       case NVA3_DISP_SOR_HDA_ELD:
+               ret = priv->sor.hda_eld(priv, or, args, size);
+               break;
+       case NV84_DISP_SOR_HDMI_PWR:
+               ret = priv->sor.hdmi(priv, head, or, data);
+               break;
+       case NV50_DISP_SOR_LVDS_SCRIPT:
+               priv->sor.lvdsconf = data & NV50_DISP_SOR_LVDS_SCRIPT_ID;
+               ret = 0;
+               break;
+       case NV94_DISP_SOR_DP_TRAIN:
+               switch (data & NV94_DISP_SOR_DP_TRAIN_OP) {
+               case NV94_DISP_SOR_DP_TRAIN_OP_PATTERN:
+                       ret = priv->sor.dp_train(priv, or, link, type, mask, data, &outp);
+                       break;
+               case NV94_DISP_SOR_DP_TRAIN_OP_INIT:
+                       ret = priv->sor.dp_train_init(priv, or, link, head, type, mask, data, &outp);
+                       break;
+               case NV94_DISP_SOR_DP_TRAIN_OP_FINI:
+                       ret = priv->sor.dp_train_fini(priv, or, link, head, type, mask, data, &outp);
+                       break;
+               default:
+                       break;
+               }
+               break;
+       case NV94_DISP_SOR_DP_LNKCTL:
+               ret = priv->sor.dp_lnkctl(priv, or, link, head, type, mask, data, &outp);
+               break;
+       case NV94_DISP_SOR_DP_DRVCTL(0):
+       case NV94_DISP_SOR_DP_DRVCTL(1):
+       case NV94_DISP_SOR_DP_DRVCTL(2):
+       case NV94_DISP_SOR_DP_DRVCTL(3):
+               ret = priv->sor.dp_drvctl(priv, or, link, (mthd & 0xc0) >> 6,
+                                         type, mask, data, &outp);
+               break;
+       default:
+               BUG_ON(1);
+       }
+
+       return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c
new file mode 100644 (file)
index 0000000..f6edd00
--- /dev/null
@@ -0,0 +1,190 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/dp.h>
+#include <subdev/bios/init.h>
+
+#include "nv50.h"
+
+static inline u32
+nv94_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane)
+{
+       static const u8 nvaf[] = { 24, 16, 8, 0 }; /* thanks, apple.. */
+       static const u8 nv94[] = { 16, 8, 0, 24 };
+       if (nv_device(priv)->chipset == 0xaf)
+               return nvaf[lane];
+       return nv94[lane];
+}
+
+int
+nv94_sor_dp_train_init(struct nv50_disp_priv *priv, int or, int link, int head,
+                      u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
+{
+       struct nouveau_bios *bios = nouveau_bios(priv);
+       struct nvbios_dpout info;
+       u8  ver, hdr, cnt, len;
+       u16 outp;
+
+       outp = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &info);
+       if (outp) {
+               struct nvbios_init init = {
+                       .subdev = nv_subdev(priv),
+                       .bios = bios,
+                       .outp = dcbo,
+                       .crtc = head,
+                       .execute = 1,
+               };
+
+               if (data & NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_ON)
+                       init.offset = info.script[2];
+               else
+                       init.offset = info.script[3];
+               nvbios_exec(&init);
+
+               init.offset = info.script[0];
+               nvbios_exec(&init);
+       }
+
+       return 0;
+}
+
+int
+nv94_sor_dp_train_fini(struct nv50_disp_priv *priv, int or, int link, int head,
+                      u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
+{
+       struct nouveau_bios *bios = nouveau_bios(priv);
+       struct nvbios_dpout info;
+       u8  ver, hdr, cnt, len;
+       u16 outp;
+
+       outp = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &info);
+       if (outp) {
+               struct nvbios_init init = {
+                       .subdev = nv_subdev(priv),
+                       .bios = bios,
+                       .offset = info.script[1],
+                       .outp = dcbo,
+                       .crtc = head,
+                       .execute = 1,
+               };
+
+               nvbios_exec(&init);
+       }
+
+       return 0;
+}
+
+int
+nv94_sor_dp_train(struct nv50_disp_priv *priv, int or, int link,
+                 u16 type, u16 mask, u32 data, struct dcb_output *info)
+{
+       const u32 loff = (or * 0x800) + (link * 0x80);
+       const u32 patt = (data & NV94_DISP_SOR_DP_TRAIN_PATTERN);
+       nv_mask(priv, 0x61c10c + loff, 0x0f000000, patt << 24);
+       return 0;
+}
+
+int
+nv94_sor_dp_lnkctl(struct nv50_disp_priv *priv, int or, int link, int head,
+                  u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
+{
+       struct nouveau_bios *bios = nouveau_bios(priv);
+       const u32 loff = (or * 0x800) + (link * 0x80);
+       const u32 soff = (or * 0x800);
+       u16 link_bw = (data & NV94_DISP_SOR_DP_LNKCTL_WIDTH) >> 8;
+       u8  link_nr = (data & NV94_DISP_SOR_DP_LNKCTL_COUNT);
+       u32 dpctrl = 0x00000000;
+       u32 clksor = 0x00000000;
+       u32 outp, lane = 0;
+       u8  ver, hdr, cnt, len;
+       struct nvbios_dpout info;
+       int i;
+
+       /* -> 10Khz units */
+       link_bw *= 2700;
+
+       outp = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &info);
+       if (outp && info.lnkcmp) {
+               struct nvbios_init init = {
+                       .subdev = nv_subdev(priv),
+                       .bios = bios,
+                       .offset = 0x0000,
+                       .outp = dcbo,
+                       .crtc = head,
+                       .execute = 1,
+               };
+
+               while (link_bw < nv_ro16(bios, info.lnkcmp))
+                       info.lnkcmp += 4;
+               init.offset = nv_ro16(bios, info.lnkcmp + 2);
+
+               nvbios_exec(&init);
+       }
+
+       dpctrl |= ((1 << link_nr) - 1) << 16;
+       if (data & NV94_DISP_SOR_DP_LNKCTL_FRAME_ENH)
+               dpctrl |= 0x00004000;
+       if (link_bw > 16200)
+               clksor |= 0x00040000;
+
+       for (i = 0; i < link_nr; i++)
+               lane |= 1 << (nv94_sor_dp_lane_map(priv, i) >> 3);
+
+       nv_mask(priv, 0x614300 + soff, 0x000c0000, clksor);
+       nv_mask(priv, 0x61c10c + loff, 0x001f4000, dpctrl);
+       nv_mask(priv, 0x61c130 + loff, 0x0000000f, lane);
+       return 0;
+}
+
+int
+nv94_sor_dp_drvctl(struct nv50_disp_priv *priv, int or, int link, int lane,
+                  u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
+{
+       struct nouveau_bios *bios = nouveau_bios(priv);
+       const u32 loff = (or * 0x800) + (link * 0x80);
+       const u8 swing = (data & NV94_DISP_SOR_DP_DRVCTL_VS) >> 8;
+       const u8 preem = (data & NV94_DISP_SOR_DP_DRVCTL_PE);
+       u32 addr, shift = nv94_sor_dp_lane_map(priv, lane);
+       u8  ver, hdr, cnt, len;
+       struct nvbios_dpout outp;
+       struct nvbios_dpcfg ocfg;
+
+       addr = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &outp);
+       if (!addr)
+               return -ENODEV;
+
+       addr = nvbios_dpcfg_match(bios, addr, 0, swing, preem, &ver, &hdr, &cnt, &len, &ocfg);
+       if (!addr)
+               return -EINVAL;
+
+       nv_mask(priv, 0x61c118 + loff, 0x000000ff << shift, ocfg.drv << shift);
+       nv_mask(priv, 0x61c120 + loff, 0x000000ff << shift, ocfg.pre << shift);
+       nv_mask(priv, 0x61c130 + loff, 0x0000ff00, ocfg.unk << 8);
+       return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
new file mode 100644 (file)
index 0000000..c37ce7e
--- /dev/null
@@ -0,0 +1,126 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/dp.h>
+#include <subdev/bios/init.h>
+
+#include "nv50.h"
+
+static inline u32
+nvd0_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane)
+{
+       static const u8 nvd0[] = { 16, 8, 0, 24 };
+       return nvd0[lane];
+}
+
+int
+nvd0_sor_dp_train(struct nv50_disp_priv *priv, int or, int link,
+                 u16 type, u16 mask, u32 data, struct dcb_output *info)
+{
+       const u32 loff = (or * 0x800) + (link * 0x80);
+       const u32 patt = (data & NV94_DISP_SOR_DP_TRAIN_PATTERN);
+       nv_mask(priv, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * patt);
+       return 0;
+}
+
+int
+nvd0_sor_dp_lnkctl(struct nv50_disp_priv *priv, int or, int link, int head,
+                  u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
+{
+       struct nouveau_bios *bios = nouveau_bios(priv);
+       const u32 loff = (or * 0x800) + (link * 0x80);
+       const u32 soff = (or * 0x800);
+       const u8  link_bw = (data & NV94_DISP_SOR_DP_LNKCTL_WIDTH) >> 8;
+       const u8  link_nr = (data & NV94_DISP_SOR_DP_LNKCTL_COUNT);
+       u32 dpctrl = 0x00000000;
+       u32 clksor = 0x00000000;
+       u32 outp, lane = 0;
+       u8  ver, hdr, cnt, len;
+       struct nvbios_dpout info;
+       int i;
+
+       outp = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &info);
+       if (outp && info.lnkcmp) {
+               struct nvbios_init init = {
+                       .subdev = nv_subdev(priv),
+                       .bios = bios,
+                       .offset = 0x0000,
+                       .outp = dcbo,
+                       .crtc = head,
+                       .execute = 1,
+               };
+
+               while (nv_ro08(bios, info.lnkcmp) < link_bw)
+                       info.lnkcmp += 3;
+               init.offset = nv_ro16(bios, info.lnkcmp + 1);
+
+               nvbios_exec(&init);
+       }
+
+       clksor |= link_bw << 18;
+       dpctrl |= ((1 << link_nr) - 1) << 16;
+       if (data & NV94_DISP_SOR_DP_LNKCTL_FRAME_ENH)
+               dpctrl |= 0x00004000;
+
+       for (i = 0; i < link_nr; i++)
+               lane |= 1 << (nvd0_sor_dp_lane_map(priv, i) >> 3);
+
+       nv_mask(priv, 0x612300 + soff, 0x007c0000, clksor);
+       nv_mask(priv, 0x61c10c + loff, 0x001f4000, dpctrl);
+       nv_mask(priv, 0x61c130 + loff, 0x0000000f, lane);
+       return 0;
+}
+
+int
+nvd0_sor_dp_drvctl(struct nv50_disp_priv *priv, int or, int link, int lane,
+                  u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
+{
+       struct nouveau_bios *bios = nouveau_bios(priv);
+       const u32 loff = (or * 0x800) + (link * 0x80);
+       const u8 swing = (data & NV94_DISP_SOR_DP_DRVCTL_VS) >> 8;
+       const u8 preem = (data & NV94_DISP_SOR_DP_DRVCTL_PE);
+       u32 addr, shift = nvd0_sor_dp_lane_map(priv, lane);
+       u8  ver, hdr, cnt, len;
+       struct nvbios_dpout outp;
+       struct nvbios_dpcfg ocfg;
+
+       addr = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &outp);
+       if (!addr)
+               return -ENODEV;
+
+       addr = nvbios_dpcfg_match(bios, addr, 0, swing, preem, &ver, &hdr, &cnt, &len, &ocfg);
+       if (!addr)
+               return -EINVAL;
+
+       nv_mask(priv, 0x61c118 + loff, 0x000000ff << shift, ocfg.drv << shift);
+       nv_mask(priv, 0x61c120 + loff, 0x000000ff << shift, ocfg.pre << shift);
+       nv_mask(priv, 0x61c130 + loff, 0x0000ff00, ocfg.unk << 8);
+       nv_mask(priv, 0x61c13c + loff, 0x00000000, 0x00000000);
+       return 0;
+}
index e1f013d..5103e88 100644 (file)
 #include <subdev/fb.h>
 #include <engine/dmaobj.h>
 
-int
-nouveau_dmaobj_create_(struct nouveau_object *parent,
-                      struct nouveau_object *engine,
-                      struct nouveau_oclass *oclass,
-                      void *data, u32 size, int len, void **pobject)
+static int
+nouveau_dmaobj_ctor(struct nouveau_object *parent,
+                   struct nouveau_object *engine,
+                   struct nouveau_oclass *oclass, void *data, u32 size,
+                   struct nouveau_object **pobject)
 {
+       struct nouveau_dmaeng *dmaeng = (void *)engine;
+       struct nouveau_dmaobj *dmaobj;
+       struct nouveau_gpuobj *gpuobj;
        struct nv_dma_class *args = data;
-       struct nouveau_dmaobj *object;
        int ret;
 
        if (size < sizeof(*args))
                return -EINVAL;
 
-       ret = nouveau_object_create_(parent, engine, oclass, 0, len, pobject);
-       object = *pobject;
+       ret = nouveau_object_create(parent, engine, oclass, 0, &dmaobj);
+       *pobject = nv_object(dmaobj);
        if (ret)
                return ret;
 
        switch (args->flags & NV_DMA_TARGET_MASK) {
        case NV_DMA_TARGET_VM:
-               object->target = NV_MEM_TARGET_VM;
+               dmaobj->target = NV_MEM_TARGET_VM;
                break;
        case NV_DMA_TARGET_VRAM:
-               object->target = NV_MEM_TARGET_VRAM;
+               dmaobj->target = NV_MEM_TARGET_VRAM;
                break;
        case NV_DMA_TARGET_PCI:
-               object->target = NV_MEM_TARGET_PCI;
+               dmaobj->target = NV_MEM_TARGET_PCI;
                break;
        case NV_DMA_TARGET_PCI_US:
        case NV_DMA_TARGET_AGP:
-               object->target = NV_MEM_TARGET_PCI_NOSNOOP;
+               dmaobj->target = NV_MEM_TARGET_PCI_NOSNOOP;
                break;
        default:
                return -EINVAL;
@@ -66,22 +68,53 @@ nouveau_dmaobj_create_(struct nouveau_object *parent,
 
        switch (args->flags & NV_DMA_ACCESS_MASK) {
        case NV_DMA_ACCESS_VM:
-               object->access = NV_MEM_ACCESS_VM;
+               dmaobj->access = NV_MEM_ACCESS_VM;
                break;
        case NV_DMA_ACCESS_RD:
-               object->access = NV_MEM_ACCESS_RO;
+               dmaobj->access = NV_MEM_ACCESS_RO;
                break;
        case NV_DMA_ACCESS_WR:
-               object->access = NV_MEM_ACCESS_WO;
+               dmaobj->access = NV_MEM_ACCESS_WO;
                break;
        case NV_DMA_ACCESS_RDWR:
-               object->access = NV_MEM_ACCESS_RW;
+               dmaobj->access = NV_MEM_ACCESS_RW;
                break;
        default:
                return -EINVAL;
        }
 
-       object->start = args->start;
-       object->limit = args->limit;
-       return 0;
+       dmaobj->start = args->start;
+       dmaobj->limit = args->limit;
+       dmaobj->conf0 = args->conf0;
+
+       switch (nv_mclass(parent)) {
+       case NV_DEVICE_CLASS:
+               /* delayed, or no, binding */
+               break;
+       default:
+               ret = dmaeng->bind(dmaeng, *pobject, dmaobj, &gpuobj);
+               if (ret == 0) {
+                       nouveau_object_ref(NULL, pobject);
+                       *pobject = nv_object(gpuobj);
+               }
+               break;
+       }
+
+       return ret;
 }
+
+static struct nouveau_ofuncs
+nouveau_dmaobj_ofuncs = {
+       .ctor = nouveau_dmaobj_ctor,
+       .dtor = nouveau_object_destroy,
+       .init = nouveau_object_init,
+       .fini = nouveau_object_fini,
+};
+
+struct nouveau_oclass
+nouveau_dmaobj_sclass[] = {
+       { NV_DMA_FROM_MEMORY_CLASS, &nouveau_dmaobj_ofuncs },
+       { NV_DMA_TO_MEMORY_CLASS, &nouveau_dmaobj_ofuncs },
+       { NV_DMA_IN_MEMORY_CLASS, &nouveau_dmaobj_ofuncs },
+       {}
+};
index 9f4cc2f..027d821 100644 (file)
@@ -34,10 +34,6 @@ struct nv04_dmaeng_priv {
        struct nouveau_dmaeng base;
 };
 
-struct nv04_dmaobj_priv {
-       struct nouveau_dmaobj base;
-};
-
 static int
 nv04_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
                 struct nouveau_object *parent,
@@ -53,6 +49,18 @@ nv04_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
        u32 length = dmaobj->limit - dmaobj->start;
        int ret;
 
+       if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
+               switch (nv_mclass(parent->parent)) {
+               case NV03_CHANNEL_DMA_CLASS:
+               case NV10_CHANNEL_DMA_CLASS:
+               case NV17_CHANNEL_DMA_CLASS:
+               case NV40_CHANNEL_DMA_CLASS:
+                       break;
+               default:
+                       return -EINVAL;
+               }
+       }
+
        if (dmaobj->target == NV_MEM_TARGET_VM) {
                if (nv_object(vmm)->oclass == &nv04_vmmgr_oclass) {
                        struct nouveau_gpuobj *pgt = vmm->vm->pgt[0].obj[0];
@@ -106,56 +114,6 @@ nv04_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
 }
 
 static int
-nv04_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-                struct nouveau_oclass *oclass, void *data, u32 size,
-                struct nouveau_object **pobject)
-{
-       struct nouveau_dmaeng *dmaeng = (void *)engine;
-       struct nv04_dmaobj_priv *dmaobj;
-       struct nouveau_gpuobj *gpuobj;
-       int ret;
-
-       ret = nouveau_dmaobj_create(parent, engine, oclass,
-                                   data, size, &dmaobj);
-       *pobject = nv_object(dmaobj);
-       if (ret)
-               return ret;
-
-       switch (nv_mclass(parent)) {
-       case NV_DEVICE_CLASS:
-               break;
-       case NV03_CHANNEL_DMA_CLASS:
-       case NV10_CHANNEL_DMA_CLASS:
-       case NV17_CHANNEL_DMA_CLASS:
-       case NV40_CHANNEL_DMA_CLASS:
-               ret = dmaeng->bind(dmaeng, *pobject, &dmaobj->base, &gpuobj);
-               nouveau_object_ref(NULL, pobject);
-               *pobject = nv_object(gpuobj);
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       return ret;
-}
-
-static struct nouveau_ofuncs
-nv04_dmaobj_ofuncs = {
-       .ctor = nv04_dmaobj_ctor,
-       .dtor = _nouveau_dmaobj_dtor,
-       .init = _nouveau_dmaobj_init,
-       .fini = _nouveau_dmaobj_fini,
-};
-
-static struct nouveau_oclass
-nv04_dmaobj_sclass[] = {
-       { 0x0002, &nv04_dmaobj_ofuncs },
-       { 0x0003, &nv04_dmaobj_ofuncs },
-       { 0x003d, &nv04_dmaobj_ofuncs },
-       {}
-};
-
-static int
 nv04_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
                 struct nouveau_oclass *oclass, void *data, u32 size,
                 struct nouveau_object **pobject)
@@ -168,7 +126,7 @@ nv04_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        if (ret)
                return ret;
 
-       priv->base.base.sclass = nv04_dmaobj_sclass;
+       nv_engine(priv)->sclass = nouveau_dmaobj_sclass;
        priv->base.bind = nv04_dmaobj_bind;
        return 0;
 }
index 045d256..750183f 100644 (file)
@@ -32,36 +32,74 @@ struct nv50_dmaeng_priv {
        struct nouveau_dmaeng base;
 };
 
-struct nv50_dmaobj_priv {
-       struct nouveau_dmaobj base;
-};
-
 static int
 nv50_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
                 struct nouveau_object *parent,
                 struct nouveau_dmaobj *dmaobj,
                 struct nouveau_gpuobj **pgpuobj)
 {
-       u32 flags = nv_mclass(dmaobj);
+       u32 flags0 = nv_mclass(dmaobj);
+       u32 flags5 = 0x00000000;
        int ret;
 
+       if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
+               switch (nv_mclass(parent->parent)) {
+               case NV50_CHANNEL_DMA_CLASS:
+               case NV84_CHANNEL_DMA_CLASS:
+               case NV50_CHANNEL_IND_CLASS:
+               case NV84_CHANNEL_IND_CLASS:
+               case NV50_DISP_MAST_CLASS:
+               case NV84_DISP_MAST_CLASS:
+               case NV94_DISP_MAST_CLASS:
+               case NVA0_DISP_MAST_CLASS:
+               case NVA3_DISP_MAST_CLASS:
+               case NV50_DISP_SYNC_CLASS:
+               case NV84_DISP_SYNC_CLASS:
+               case NV94_DISP_SYNC_CLASS:
+               case NVA0_DISP_SYNC_CLASS:
+               case NVA3_DISP_SYNC_CLASS:
+               case NV50_DISP_OVLY_CLASS:
+               case NV84_DISP_OVLY_CLASS:
+               case NV94_DISP_OVLY_CLASS:
+               case NVA0_DISP_OVLY_CLASS:
+               case NVA3_DISP_OVLY_CLASS:
+                       break;
+               default:
+                       return -EINVAL;
+               }
+       }
+
+       if (!(dmaobj->conf0 & NV50_DMA_CONF0_ENABLE)) {
+               if (dmaobj->target == NV_MEM_TARGET_VM) {
+                       dmaobj->conf0  = NV50_DMA_CONF0_PRIV_VM;
+                       dmaobj->conf0 |= NV50_DMA_CONF0_PART_VM;
+                       dmaobj->conf0 |= NV50_DMA_CONF0_COMP_VM;
+                       dmaobj->conf0 |= NV50_DMA_CONF0_TYPE_VM;
+               } else {
+                       dmaobj->conf0  = NV50_DMA_CONF0_PRIV_US;
+                       dmaobj->conf0 |= NV50_DMA_CONF0_PART_256;
+                       dmaobj->conf0 |= NV50_DMA_CONF0_COMP_NONE;
+                       dmaobj->conf0 |= NV50_DMA_CONF0_TYPE_LINEAR;
+               }
+       }
+
+       flags0 |= (dmaobj->conf0 & NV50_DMA_CONF0_COMP) << 22;
+       flags0 |= (dmaobj->conf0 & NV50_DMA_CONF0_TYPE) << 22;
+       flags0 |= (dmaobj->conf0 & NV50_DMA_CONF0_PRIV);
+       flags5 |= (dmaobj->conf0 & NV50_DMA_CONF0_PART);
+
        switch (dmaobj->target) {
        case NV_MEM_TARGET_VM:
-               flags |= 0x00000000;
-               flags |= 0x60000000; /* COMPRESSION_USEVM */
-               flags |= 0x1fc00000; /* STORAGE_TYPE_USEVM */
+               flags0 |= 0x00000000;
                break;
        case NV_MEM_TARGET_VRAM:
-               flags |= 0x00010000;
-               flags |= 0x00100000; /* ACCESSUS_USER_SYSTEM */
+               flags0 |= 0x00010000;
                break;
        case NV_MEM_TARGET_PCI:
-               flags |= 0x00020000;
-               flags |= 0x00100000; /* ACCESSUS_USER_SYSTEM */
+               flags0 |= 0x00020000;
                break;
        case NV_MEM_TARGET_PCI_NOSNOOP:
-               flags |= 0x00030000;
-               flags |= 0x00100000; /* ACCESSUS_USER_SYSTEM */
+               flags0 |= 0x00030000;
                break;
        default:
                return -EINVAL;
@@ -71,79 +109,29 @@ nv50_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
        case NV_MEM_ACCESS_VM:
                break;
        case NV_MEM_ACCESS_RO:
-               flags |= 0x00040000;
+               flags0 |= 0x00040000;
                break;
        case NV_MEM_ACCESS_WO:
        case NV_MEM_ACCESS_RW:
-               flags |= 0x00080000;
+               flags0 |= 0x00080000;
                break;
        }
 
        ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
        if (ret == 0) {
-               nv_wo32(*pgpuobj, 0x00, flags);
+               nv_wo32(*pgpuobj, 0x00, flags0);
                nv_wo32(*pgpuobj, 0x04, lower_32_bits(dmaobj->limit));
                nv_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->start));
                nv_wo32(*pgpuobj, 0x0c, upper_32_bits(dmaobj->limit) << 24 |
                                        upper_32_bits(dmaobj->start));
                nv_wo32(*pgpuobj, 0x10, 0x00000000);
-               nv_wo32(*pgpuobj, 0x14, 0x00000000);
+               nv_wo32(*pgpuobj, 0x14, flags5);
        }
 
        return ret;
 }
 
 static int
-nv50_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-                struct nouveau_oclass *oclass, void *data, u32 size,
-                struct nouveau_object **pobject)
-{
-       struct nouveau_dmaeng *dmaeng = (void *)engine;
-       struct nv50_dmaobj_priv *dmaobj;
-       struct nouveau_gpuobj *gpuobj;
-       int ret;
-
-       ret = nouveau_dmaobj_create(parent, engine, oclass,
-                                   data, size, &dmaobj);
-       *pobject = nv_object(dmaobj);
-       if (ret)
-               return ret;
-
-       switch (nv_mclass(parent)) {
-       case NV_DEVICE_CLASS:
-               break;
-       case NV50_CHANNEL_DMA_CLASS:
-       case NV84_CHANNEL_DMA_CLASS:
-       case NV50_CHANNEL_IND_CLASS:
-       case NV84_CHANNEL_IND_CLASS:
-               ret = dmaeng->bind(dmaeng, *pobject, &dmaobj->base, &gpuobj);
-               nouveau_object_ref(NULL, pobject);
-               *pobject = nv_object(gpuobj);
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       return ret;
-}
-
-static struct nouveau_ofuncs
-nv50_dmaobj_ofuncs = {
-       .ctor = nv50_dmaobj_ctor,
-       .dtor = _nouveau_dmaobj_dtor,
-       .init = _nouveau_dmaobj_init,
-       .fini = _nouveau_dmaobj_fini,
-};
-
-static struct nouveau_oclass
-nv50_dmaobj_sclass[] = {
-       { 0x0002, &nv50_dmaobj_ofuncs },
-       { 0x0003, &nv50_dmaobj_ofuncs },
-       { 0x003d, &nv50_dmaobj_ofuncs },
-       {}
-};
-
-static int
 nv50_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
                 struct nouveau_oclass *oclass, void *data, u32 size,
                 struct nouveau_object **pobject)
@@ -156,7 +144,7 @@ nv50_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        if (ret)
                return ret;
 
-       priv->base.base.sclass = nv50_dmaobj_sclass;
+       nv_engine(priv)->sclass = nouveau_dmaobj_sclass;
        priv->base.bind = nv50_dmaobj_bind;
        return 0;
 }
index 5baa086..cd3970d 100644 (file)
@@ -22,7 +22,9 @@
  * Authors: Ben Skeggs
  */
 
+#include <core/device.h>
 #include <core/gpuobj.h>
+#include <core/class.h>
 
 #include <subdev/fb.h>
 #include <engine/dmaobj.h>
@@ -31,44 +33,85 @@ struct nvc0_dmaeng_priv {
        struct nouveau_dmaeng base;
 };
 
-struct nvc0_dmaobj_priv {
-       struct nouveau_dmaobj base;
-};
-
 static int
-nvc0_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-                struct nouveau_oclass *oclass, void *data, u32 size,
-                struct nouveau_object **pobject)
+nvc0_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
+                struct nouveau_object *parent,
+                struct nouveau_dmaobj *dmaobj,
+                struct nouveau_gpuobj **pgpuobj)
 {
-       struct nvc0_dmaobj_priv *dmaobj;
+       u32 flags0 = nv_mclass(dmaobj);
+       u32 flags5 = 0x00000000;
        int ret;
 
-       ret = nouveau_dmaobj_create(parent, engine, oclass, data, size, &dmaobj);
-       *pobject = nv_object(dmaobj);
-       if (ret)
-               return ret;
+       if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
+               switch (nv_mclass(parent->parent)) {
+               case NVA3_DISP_MAST_CLASS:
+               case NVA3_DISP_SYNC_CLASS:
+               case NVA3_DISP_OVLY_CLASS:
+                       break;
+               default:
+                       return -EINVAL;
+               }
+       } else
+               return 0;
+
+       if (!(dmaobj->conf0 & NVC0_DMA_CONF0_ENABLE)) {
+               if (dmaobj->target == NV_MEM_TARGET_VM) {
+                       dmaobj->conf0  = NVC0_DMA_CONF0_PRIV_VM;
+                       dmaobj->conf0 |= NVC0_DMA_CONF0_TYPE_VM;
+               } else {
+                       dmaobj->conf0  = NVC0_DMA_CONF0_PRIV_US;
+                       dmaobj->conf0 |= NVC0_DMA_CONF0_TYPE_LINEAR;
+                       dmaobj->conf0 |= 0x00020000;
+               }
+       }
 
-       if (dmaobj->base.target != NV_MEM_TARGET_VM || dmaobj->base.start)
+       flags0 |= (dmaobj->conf0 & NVC0_DMA_CONF0_TYPE) << 22;
+       flags0 |= (dmaobj->conf0 & NVC0_DMA_CONF0_PRIV);
+       flags5 |= (dmaobj->conf0 & NVC0_DMA_CONF0_UNKN);
+
+       switch (dmaobj->target) {
+       case NV_MEM_TARGET_VM:
+               flags0 |= 0x00000000;
+               break;
+       case NV_MEM_TARGET_VRAM:
+               flags0 |= 0x00010000;
+               break;
+       case NV_MEM_TARGET_PCI:
+               flags0 |= 0x00020000;
+               break;
+       case NV_MEM_TARGET_PCI_NOSNOOP:
+               flags0 |= 0x00030000;
+               break;
+       default:
                return -EINVAL;
+       }
 
-       return 0;
-}
+       switch (dmaobj->access) {
+       case NV_MEM_ACCESS_VM:
+               break;
+       case NV_MEM_ACCESS_RO:
+               flags0 |= 0x00040000;
+               break;
+       case NV_MEM_ACCESS_WO:
+       case NV_MEM_ACCESS_RW:
+               flags0 |= 0x00080000;
+               break;
+       }
 
-static struct nouveau_ofuncs
-nvc0_dmaobj_ofuncs = {
-       .ctor = nvc0_dmaobj_ctor,
-       .dtor = _nouveau_dmaobj_dtor,
-       .init = _nouveau_dmaobj_init,
-       .fini = _nouveau_dmaobj_fini,
-};
+       ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
+       if (ret == 0) {
+               nv_wo32(*pgpuobj, 0x00, flags0);
+               nv_wo32(*pgpuobj, 0x04, lower_32_bits(dmaobj->limit));
+               nv_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->start));
+               nv_wo32(*pgpuobj, 0x0c, upper_32_bits(dmaobj->limit) << 24 |
+                                       upper_32_bits(dmaobj->start));
+               nv_wo32(*pgpuobj, 0x10, 0x00000000);
+               nv_wo32(*pgpuobj, 0x14, flags5);
+       }
 
-static struct nouveau_oclass
-nvc0_dmaobj_sclass[] = {
-       { 0x0002, &nvc0_dmaobj_ofuncs },
-       { 0x0003, &nvc0_dmaobj_ofuncs },
-       { 0x003d, &nvc0_dmaobj_ofuncs },
-       {}
-};
+       return ret;
+}
 
 static int
 nvc0_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
@@ -83,7 +126,8 @@ nvc0_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        if (ret)
                return ret;
 
-       priv->base.base.sclass = nvc0_dmaobj_sclass;
+       nv_engine(priv)->sclass = nouveau_dmaobj_sclass;
+       priv->base.bind = nvc0_dmaobj_bind;
        return 0;
 }
 
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c
new file mode 100644 (file)
index 0000000..d152875
--- /dev/null
@@ -0,0 +1,122 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/device.h>
+#include <core/gpuobj.h>
+#include <core/class.h>
+
+#include <subdev/fb.h>
+#include <engine/dmaobj.h>
+
+struct nvd0_dmaeng_priv {
+       struct nouveau_dmaeng base;
+};
+
+static int
+nvd0_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
+                struct nouveau_object *parent,
+                struct nouveau_dmaobj *dmaobj,
+                struct nouveau_gpuobj **pgpuobj)
+{
+       u32 flags0 = 0x00000000;
+       int ret;
+
+       if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
+               switch (nv_mclass(parent->parent)) {
+               case NVD0_DISP_MAST_CLASS:
+               case NVD0_DISP_SYNC_CLASS:
+               case NVD0_DISP_OVLY_CLASS:
+               case NVE0_DISP_MAST_CLASS:
+               case NVE0_DISP_SYNC_CLASS:
+               case NVE0_DISP_OVLY_CLASS:
+                       break;
+               default:
+                       return -EINVAL;
+               }
+       } else
+               return 0;
+
+       if (!(dmaobj->conf0 & NVD0_DMA_CONF0_ENABLE)) {
+               if (dmaobj->target == NV_MEM_TARGET_VM) {
+                       dmaobj->conf0 |= NVD0_DMA_CONF0_TYPE_VM;
+                       dmaobj->conf0 |= NVD0_DMA_CONF0_PAGE_LP;
+               } else {
+                       dmaobj->conf0 |= NVD0_DMA_CONF0_TYPE_LINEAR;
+                       dmaobj->conf0 |= NVD0_DMA_CONF0_PAGE_SP;
+               }
+       }
+
+       flags0 |= (dmaobj->conf0 & NVD0_DMA_CONF0_TYPE) << 20;
+       flags0 |= (dmaobj->conf0 & NVD0_DMA_CONF0_PAGE) >> 4;
+
+       switch (dmaobj->target) {
+       case NV_MEM_TARGET_VRAM:
+               flags0 |= 0x00000009;
+               break;
+       default:
+               return -EINVAL;
+               break;
+       }
+
+       ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
+       if (ret == 0) {
+               nv_wo32(*pgpuobj, 0x00, flags0);
+               nv_wo32(*pgpuobj, 0x04, dmaobj->start >> 8);
+               nv_wo32(*pgpuobj, 0x08, dmaobj->limit >> 8);
+               nv_wo32(*pgpuobj, 0x0c, 0x00000000);
+               nv_wo32(*pgpuobj, 0x10, 0x00000000);
+               nv_wo32(*pgpuobj, 0x14, 0x00000000);
+       }
+
+       return ret;
+}
+
+static int
+nvd0_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+                struct nouveau_oclass *oclass, void *data, u32 size,
+                struct nouveau_object **pobject)
+{
+       struct nvd0_dmaeng_priv *priv;
+       int ret;
+
+       ret = nouveau_dmaeng_create(parent, engine, oclass, &priv);
+       *pobject = nv_object(priv);
+       if (ret)
+               return ret;
+
+       nv_engine(priv)->sclass = nouveau_dmaobj_sclass;
+       priv->base.bind = nvd0_dmaobj_bind;
+       return 0;
+}
+
+struct nouveau_oclass
+nvd0_dmaeng_oclass = {
+       .handle = NV_ENGINE(DMAOBJ, 0xd0),
+       .ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = nvd0_dmaeng_ctor,
+               .dtor = _nouveau_dmaeng_dtor,
+               .init = _nouveau_dmaeng_init,
+               .fini = _nouveau_dmaeng_fini,
+       },
+};
index bbb43c6..c2b9db3 100644 (file)
@@ -24,6 +24,7 @@
 
 #include <core/object.h>
 #include <core/handle.h>
+#include <core/class.h>
 
 #include <engine/dmaobj.h>
 #include <engine/fifo.h>
@@ -33,7 +34,7 @@ nouveau_fifo_channel_create_(struct nouveau_object *parent,
                             struct nouveau_object *engine,
                             struct nouveau_oclass *oclass,
                             int bar, u32 addr, u32 size, u32 pushbuf,
-                            u32 engmask, int len, void **ptr)
+                            u64 engmask, int len, void **ptr)
 {
        struct nouveau_device *device = nv_device(engine);
        struct nouveau_fifo *priv = (void *)engine;
@@ -56,18 +57,16 @@ nouveau_fifo_channel_create_(struct nouveau_object *parent,
 
        dmaeng = (void *)chan->pushdma->base.engine;
        switch (chan->pushdma->base.oclass->handle) {
-       case 0x0002:
-       case 0x003d:
+       case NV_DMA_FROM_MEMORY_CLASS:
+       case NV_DMA_IN_MEMORY_CLASS:
                break;
        default:
                return -EINVAL;
        }
 
-       if (dmaeng->bind) {
-               ret = dmaeng->bind(dmaeng, parent, chan->pushdma, &chan->pushgpu);
-               if (ret)
-                       return ret;
-       }
+       ret = dmaeng->bind(dmaeng, parent, chan->pushdma, &chan->pushgpu);
+       if (ret)
+               return ret;
 
        /* find a free fifo channel */
        spin_lock_irqsave(&priv->lock, flags);
@@ -119,14 +118,14 @@ _nouveau_fifo_channel_dtor(struct nouveau_object *object)
 }
 
 u32
-_nouveau_fifo_channel_rd32(struct nouveau_object *object, u32 addr)
+_nouveau_fifo_channel_rd32(struct nouveau_object *object, u64 addr)
 {
        struct nouveau_fifo_chan *chan = (void *)object;
        return ioread32_native(chan->user + addr);
 }
 
 void
-_nouveau_fifo_channel_wr32(struct nouveau_object *object, u32 addr, u32 data)
+_nouveau_fifo_channel_wr32(struct nouveau_object *object, u64 addr, u32 data)
 {
        struct nouveau_fifo_chan *chan = (void *)object;
        iowrite32_native(data, chan->user + addr);
index ea76e3e..a47a854 100644 (file)
@@ -126,9 +126,9 @@ nv04_fifo_chan_ctor(struct nouveau_object *parent,
 
        ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
                                          0x10000, args->pushbuf,
-                                         (1 << NVDEV_ENGINE_DMAOBJ) |
-                                         (1 << NVDEV_ENGINE_SW) |
-                                         (1 << NVDEV_ENGINE_GR), &chan);
+                                         (1ULL << NVDEV_ENGINE_DMAOBJ) |
+                                         (1ULL << NVDEV_ENGINE_SW) |
+                                         (1ULL << NVDEV_ENGINE_GR), &chan);
        *pobject = nv_object(chan);
        if (ret)
                return ret;
@@ -440,7 +440,7 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
                        }
 
                        if (!nv04_fifo_swmthd(priv, chid, mthd, data)) {
-                               nv_info(priv, "CACHE_ERROR - Ch %d/%d "
+                               nv_error(priv, "CACHE_ERROR - Ch %d/%d "
                                              "Mthd 0x%04x Data 0x%08x\n",
                                        chid, (mthd >> 13) & 7, mthd & 0x1ffc,
                                        data);
@@ -476,7 +476,7 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
                                u32 ib_get = nv_rd32(priv, 0x003334);
                                u32 ib_put = nv_rd32(priv, 0x003330);
 
-                               nv_info(priv, "DMA_PUSHER - Ch %d Get 0x%02x%08x "
+                               nv_error(priv, "DMA_PUSHER - Ch %d Get 0x%02x%08x "
                                     "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
                                     "State 0x%08x (err: %s) Push 0x%08x\n",
                                        chid, ho_get, dma_get, ho_put,
@@ -494,7 +494,7 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
                                        nv_wr32(priv, 0x003334, ib_put);
                                }
                        } else {
-                               nv_info(priv, "DMA_PUSHER - Ch %d Get 0x%08x "
+                               nv_error(priv, "DMA_PUSHER - Ch %d Get 0x%08x "
                                             "Put 0x%08x State 0x%08x (err: %s) Push 0x%08x\n",
                                        chid, dma_get, dma_put, state,
                                        nv_dma_state_err(state), push);
@@ -525,14 +525,13 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
 
                if (device->card_type == NV_50) {
                        if (status & 0x00000010) {
-                               nv50_fb_trap(nouveau_fb(priv), 1);
                                status &= ~0x00000010;
                                nv_wr32(priv, 0x002100, 0x00000010);
                        }
                }
 
                if (status) {
-                       nv_info(priv, "unknown intr 0x%08x, ch %d\n",
+                       nv_warn(priv, "unknown intr 0x%08x, ch %d\n",
                                status, chid);
                        nv_wr32(priv, NV03_PFIFO_INTR_0, status);
                        status = 0;
@@ -542,7 +541,7 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
        }
 
        if (status) {
-               nv_info(priv, "still angry after %d spins, halt\n", cnt);
+               nv_error(priv, "still angry after %d spins, halt\n", cnt);
                nv_wr32(priv, 0x002140, 0);
                nv_wr32(priv, 0x000140, 0);
        }
index 4ba7542..2c927c1 100644 (file)
@@ -69,9 +69,9 @@ nv10_fifo_chan_ctor(struct nouveau_object *parent,
 
        ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
                                          0x10000, args->pushbuf,
-                                         (1 << NVDEV_ENGINE_DMAOBJ) |
-                                         (1 << NVDEV_ENGINE_SW) |
-                                         (1 << NVDEV_ENGINE_GR), &chan);
+                                         (1ULL << NVDEV_ENGINE_DMAOBJ) |
+                                         (1ULL << NVDEV_ENGINE_SW) |
+                                         (1ULL << NVDEV_ENGINE_GR), &chan);
        *pobject = nv_object(chan);
        if (ret)
                return ret;
index b96e6b0..a9cb51d 100644 (file)
@@ -74,10 +74,10 @@ nv17_fifo_chan_ctor(struct nouveau_object *parent,
 
        ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
                                          0x10000, args->pushbuf,
-                                         (1 << NVDEV_ENGINE_DMAOBJ) |
-                                         (1 << NVDEV_ENGINE_SW) |
-                                         (1 << NVDEV_ENGINE_GR) |
-                                         (1 << NVDEV_ENGINE_MPEG), /* NV31- */
+                                         (1ULL << NVDEV_ENGINE_DMAOBJ) |
+                                         (1ULL << NVDEV_ENGINE_SW) |
+                                         (1ULL << NVDEV_ENGINE_GR) |
+                                         (1ULL << NVDEV_ENGINE_MPEG), /* NV31- */
                                          &chan);
        *pobject = nv_object(chan);
        if (ret)
index 559c3b4..2b1f917 100644 (file)
@@ -192,10 +192,10 @@ nv40_fifo_chan_ctor(struct nouveau_object *parent,
 
        ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
                                          0x1000, args->pushbuf,
-                                         (1 << NVDEV_ENGINE_DMAOBJ) |
-                                         (1 << NVDEV_ENGINE_SW) |
-                                         (1 << NVDEV_ENGINE_GR) |
-                                         (1 << NVDEV_ENGINE_MPEG), &chan);
+                                         (1ULL << NVDEV_ENGINE_DMAOBJ) |
+                                         (1ULL << NVDEV_ENGINE_SW) |
+                                         (1ULL << NVDEV_ENGINE_GR) |
+                                         (1ULL << NVDEV_ENGINE_MPEG), &chan);
        *pobject = nv_object(chan);
        if (ret)
                return ret;
index 536e763..bd09636 100644 (file)
@@ -112,14 +112,6 @@ nv50_fifo_context_detach(struct nouveau_object *parent, bool suspend,
                return -EINVAL;
        }
 
-       nv_wo32(base->eng, addr + 0x00, 0x00000000);
-       nv_wo32(base->eng, addr + 0x04, 0x00000000);
-       nv_wo32(base->eng, addr + 0x08, 0x00000000);
-       nv_wo32(base->eng, addr + 0x0c, 0x00000000);
-       nv_wo32(base->eng, addr + 0x10, 0x00000000);
-       nv_wo32(base->eng, addr + 0x14, 0x00000000);
-       bar->flush(bar);
-
        /* HW bug workaround:
         *
         * PFIFO will hang forever if the connected engines don't report
@@ -141,8 +133,18 @@ nv50_fifo_context_detach(struct nouveau_object *parent, bool suspend,
                if (suspend)
                        ret = -EBUSY;
        }
-
        nv_wr32(priv, 0x00b860, me);
+
+       if (ret == 0) {
+               nv_wo32(base->eng, addr + 0x00, 0x00000000);
+               nv_wo32(base->eng, addr + 0x04, 0x00000000);
+               nv_wo32(base->eng, addr + 0x08, 0x00000000);
+               nv_wo32(base->eng, addr + 0x0c, 0x00000000);
+               nv_wo32(base->eng, addr + 0x10, 0x00000000);
+               nv_wo32(base->eng, addr + 0x14, 0x00000000);
+               bar->flush(bar);
+       }
+
        return ret;
 }
 
@@ -194,10 +196,10 @@ nv50_fifo_chan_ctor_dma(struct nouveau_object *parent,
 
        ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
                                          0x2000, args->pushbuf,
-                                         (1 << NVDEV_ENGINE_DMAOBJ) |
-                                         (1 << NVDEV_ENGINE_SW) |
-                                         (1 << NVDEV_ENGINE_GR) |
-                                         (1 << NVDEV_ENGINE_MPEG), &chan);
+                                         (1ULL << NVDEV_ENGINE_DMAOBJ) |
+                                         (1ULL << NVDEV_ENGINE_SW) |
+                                         (1ULL << NVDEV_ENGINE_GR) |
+                                         (1ULL << NVDEV_ENGINE_MPEG), &chan);
        *pobject = nv_object(chan);
        if (ret)
                return ret;
@@ -247,10 +249,10 @@ nv50_fifo_chan_ctor_ind(struct nouveau_object *parent,
 
        ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
                                          0x2000, args->pushbuf,
-                                         (1 << NVDEV_ENGINE_DMAOBJ) |
-                                         (1 << NVDEV_ENGINE_SW) |
-                                         (1 << NVDEV_ENGINE_GR) |
-                                         (1 << NVDEV_ENGINE_MPEG), &chan);
+                                         (1ULL << NVDEV_ENGINE_DMAOBJ) |
+                                         (1ULL << NVDEV_ENGINE_SW) |
+                                         (1ULL << NVDEV_ENGINE_GR) |
+                                         (1ULL << NVDEV_ENGINE_MPEG), &chan);
        *pobject = nv_object(chan);
        if (ret)
                return ret;
index b4fd26d..1eb1c51 100644 (file)
@@ -95,14 +95,6 @@ nv84_fifo_context_detach(struct nouveau_object *parent, bool suspend,
                return -EINVAL;
        }
 
-       nv_wo32(base->eng, addr + 0x00, 0x00000000);
-       nv_wo32(base->eng, addr + 0x04, 0x00000000);
-       nv_wo32(base->eng, addr + 0x08, 0x00000000);
-       nv_wo32(base->eng, addr + 0x0c, 0x00000000);
-       nv_wo32(base->eng, addr + 0x10, 0x00000000);
-       nv_wo32(base->eng, addr + 0x14, 0x00000000);
-       bar->flush(bar);
-
        save = nv_mask(priv, 0x002520, 0x0000003f, 1 << engn);
        nv_wr32(priv, 0x0032fc, nv_gpuobj(base)->addr >> 12);
        done = nv_wait_ne(priv, 0x0032fc, 0xffffffff, 0xffffffff);
@@ -112,6 +104,14 @@ nv84_fifo_context_detach(struct nouveau_object *parent, bool suspend,
                if (suspend)
                        return -EBUSY;
        }
+
+       nv_wo32(base->eng, addr + 0x00, 0x00000000);
+       nv_wo32(base->eng, addr + 0x04, 0x00000000);
+       nv_wo32(base->eng, addr + 0x08, 0x00000000);
+       nv_wo32(base->eng, addr + 0x0c, 0x00000000);
+       nv_wo32(base->eng, addr + 0x10, 0x00000000);
+       nv_wo32(base->eng, addr + 0x14, 0x00000000);
+       bar->flush(bar);
        return 0;
 }
 
@@ -163,17 +163,17 @@ nv84_fifo_chan_ctor_dma(struct nouveau_object *parent,
 
        ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
                                          0x2000, args->pushbuf,
-                                         (1 << NVDEV_ENGINE_DMAOBJ) |
-                                         (1 << NVDEV_ENGINE_SW) |
-                                         (1 << NVDEV_ENGINE_GR) |
-                                         (1 << NVDEV_ENGINE_MPEG) |
-                                         (1 << NVDEV_ENGINE_ME) |
-                                         (1 << NVDEV_ENGINE_VP) |
-                                         (1 << NVDEV_ENGINE_CRYPT) |
-                                         (1 << NVDEV_ENGINE_BSP) |
-                                         (1 << NVDEV_ENGINE_PPP) |
-                                         (1 << NVDEV_ENGINE_COPY0) |
-                                         (1 << NVDEV_ENGINE_UNK1C1), &chan);
+                                         (1ULL << NVDEV_ENGINE_DMAOBJ) |
+                                         (1ULL << NVDEV_ENGINE_SW) |
+                                         (1ULL << NVDEV_ENGINE_GR) |
+                                         (1ULL << NVDEV_ENGINE_MPEG) |
+                                         (1ULL << NVDEV_ENGINE_ME) |
+                                         (1ULL << NVDEV_ENGINE_VP) |
+                                         (1ULL << NVDEV_ENGINE_CRYPT) |
+                                         (1ULL << NVDEV_ENGINE_BSP) |
+                                         (1ULL << NVDEV_ENGINE_PPP) |
+                                         (1ULL << NVDEV_ENGINE_COPY0) |
+                                         (1ULL << NVDEV_ENGINE_UNK1C1), &chan);
        *pobject = nv_object(chan);
        if (ret)
                return ret;
@@ -225,17 +225,17 @@ nv84_fifo_chan_ctor_ind(struct nouveau_object *parent,
 
        ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
                                          0x2000, args->pushbuf,
-                                         (1 << NVDEV_ENGINE_DMAOBJ) |
-                                         (1 << NVDEV_ENGINE_SW) |
-                                         (1 << NVDEV_ENGINE_GR) |
-                                         (1 << NVDEV_ENGINE_MPEG) |
-                                         (1 << NVDEV_ENGINE_ME) |
-                                         (1 << NVDEV_ENGINE_VP) |
-                                         (1 << NVDEV_ENGINE_CRYPT) |
-                                         (1 << NVDEV_ENGINE_BSP) |
-                                         (1 << NVDEV_ENGINE_PPP) |
-                                         (1 << NVDEV_ENGINE_COPY0) |
-                                         (1 << NVDEV_ENGINE_UNK1C1), &chan);
+                                         (1ULL << NVDEV_ENGINE_DMAOBJ) |
+                                         (1ULL << NVDEV_ENGINE_SW) |
+                                         (1ULL << NVDEV_ENGINE_GR) |
+                                         (1ULL << NVDEV_ENGINE_MPEG) |
+                                         (1ULL << NVDEV_ENGINE_ME) |
+                                         (1ULL << NVDEV_ENGINE_VP) |
+                                         (1ULL << NVDEV_ENGINE_CRYPT) |
+                                         (1ULL << NVDEV_ENGINE_BSP) |
+                                         (1ULL << NVDEV_ENGINE_PPP) |
+                                         (1ULL << NVDEV_ENGINE_COPY0) |
+                                         (1ULL << NVDEV_ENGINE_UNK1C1), &chan);
        *pobject = nv_object(chan);
        if (ret)
                return ret;
index 6f21be6..b4365dd 100644 (file)
@@ -103,6 +103,9 @@ nvc0_fifo_context_attach(struct nouveau_object *parent,
        case NVDEV_ENGINE_GR   : addr = 0x0210; break;
        case NVDEV_ENGINE_COPY0: addr = 0x0230; break;
        case NVDEV_ENGINE_COPY1: addr = 0x0240; break;
+       case NVDEV_ENGINE_BSP  : addr = 0x0270; break;
+       case NVDEV_ENGINE_VP   : addr = 0x0250; break;
+       case NVDEV_ENGINE_PPP  : addr = 0x0260; break;
        default:
                return -EINVAL;
        }
@@ -137,14 +140,13 @@ nvc0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
        case NVDEV_ENGINE_GR   : addr = 0x0210; break;
        case NVDEV_ENGINE_COPY0: addr = 0x0230; break;
        case NVDEV_ENGINE_COPY1: addr = 0x0240; break;
+       case NVDEV_ENGINE_BSP  : addr = 0x0270; break;
+       case NVDEV_ENGINE_VP   : addr = 0x0250; break;
+       case NVDEV_ENGINE_PPP  : addr = 0x0260; break;
        default:
                return -EINVAL;
        }
 
-       nv_wo32(base, addr + 0x00, 0x00000000);
-       nv_wo32(base, addr + 0x04, 0x00000000);
-       bar->flush(bar);
-
        nv_wr32(priv, 0x002634, chan->base.chid);
        if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
                nv_error(priv, "channel %d kick timeout\n", chan->base.chid);
@@ -152,6 +154,9 @@ nvc0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
                        return -EBUSY;
        }
 
+       nv_wo32(base, addr + 0x00, 0x00000000);
+       nv_wo32(base, addr + 0x04, 0x00000000);
+       bar->flush(bar);
        return 0;
 }
 
@@ -175,10 +180,13 @@ nvc0_fifo_chan_ctor(struct nouveau_object *parent,
        ret = nouveau_fifo_channel_create(parent, engine, oclass, 1,
                                          priv->user.bar.offset, 0x1000,
                                          args->pushbuf,
-                                         (1 << NVDEV_ENGINE_SW) |
-                                         (1 << NVDEV_ENGINE_GR) |
-                                         (1 << NVDEV_ENGINE_COPY0) |
-                                         (1 << NVDEV_ENGINE_COPY1), &chan);
+                                         (1ULL << NVDEV_ENGINE_SW) |
+                                         (1ULL << NVDEV_ENGINE_GR) |
+                                         (1ULL << NVDEV_ENGINE_COPY0) |
+                                         (1ULL << NVDEV_ENGINE_COPY1) |
+                                         (1ULL << NVDEV_ENGINE_BSP) |
+                                         (1ULL << NVDEV_ENGINE_VP) |
+                                         (1ULL << NVDEV_ENGINE_PPP), &chan);
        *pobject = nv_object(chan);
        if (ret)
                return ret;
@@ -494,7 +502,7 @@ nvc0_fifo_intr(struct nouveau_subdev *subdev)
        u32 stat = nv_rd32(priv, 0x002100) & mask;
 
        if (stat & 0x00000100) {
-               nv_info(priv, "unknown status 0x00000100\n");
+               nv_warn(priv, "unknown status 0x00000100\n");
                nv_wr32(priv, 0x002100, 0x00000100);
                stat &= ~0x00000100;
        }
index 36e81b6..c930da9 100644 (file)
 #include <engine/dmaobj.h>
 #include <engine/fifo.h>
 
-#define _(a,b) { (a), ((1 << (a)) | (b)) }
+#define _(a,b) { (a), ((1ULL << (a)) | (b)) }
 static const struct {
-       int subdev;
-       u32 mask;
+       u64 subdev;
+       u64 mask;
 } fifo_engine[] = {
-       _(NVDEV_ENGINE_GR      , (1 << NVDEV_ENGINE_SW)),
+       _(NVDEV_ENGINE_GR      , (1ULL << NVDEV_ENGINE_SW)),
        _(NVDEV_ENGINE_VP      , 0),
        _(NVDEV_ENGINE_PPP     , 0),
        _(NVDEV_ENGINE_BSP     , 0),
@@ -138,6 +138,9 @@ nve0_fifo_context_attach(struct nouveau_object *parent,
        case NVDEV_ENGINE_GR   :
        case NVDEV_ENGINE_COPY0:
        case NVDEV_ENGINE_COPY1: addr = 0x0210; break;
+       case NVDEV_ENGINE_BSP  : addr = 0x0270; break;
+       case NVDEV_ENGINE_VP   : addr = 0x0250; break;
+       case NVDEV_ENGINE_PPP  : addr = 0x0260; break;
        default:
                return -EINVAL;
        }
@@ -172,14 +175,13 @@ nve0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
        case NVDEV_ENGINE_GR   :
        case NVDEV_ENGINE_COPY0:
        case NVDEV_ENGINE_COPY1: addr = 0x0210; break;
+       case NVDEV_ENGINE_BSP  : addr = 0x0270; break;
+       case NVDEV_ENGINE_VP   : addr = 0x0250; break;
+       case NVDEV_ENGINE_PPP  : addr = 0x0260; break;
        default:
                return -EINVAL;
        }
 
-       nv_wo32(base, addr + 0x00, 0x00000000);
-       nv_wo32(base, addr + 0x04, 0x00000000);
-       bar->flush(bar);
-
        nv_wr32(priv, 0x002634, chan->base.chid);
        if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
                nv_error(priv, "channel %d kick timeout\n", chan->base.chid);
@@ -187,6 +189,9 @@ nve0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
                        return -EBUSY;
        }
 
+       nv_wo32(base, addr + 0x00, 0x00000000);
+       nv_wo32(base, addr + 0x04, 0x00000000);
+       bar->flush(bar);
        return 0;
 }
 
index 6185282..e30a9c5 100644 (file)
@@ -787,168 +787,168 @@ nv01_graph_mthd_bind_chroma(struct nouveau_object *object, u32 mthd,
 
 static struct nouveau_omthds
 nv03_graph_gdi_omthds[] = {
-       { 0x0184, nv01_graph_mthd_bind_patt },
-       { 0x0188, nv04_graph_mthd_bind_rop },
-       { 0x018c, nv04_graph_mthd_bind_beta1 },
-       { 0x0190, nv04_graph_mthd_bind_surf_dst },
-       { 0x02fc, nv04_graph_mthd_set_operation },
+       { 0x0184, 0x0184, nv01_graph_mthd_bind_patt },
+       { 0x0188, 0x0188, nv04_graph_mthd_bind_rop },
+       { 0x018c, 0x018c, nv04_graph_mthd_bind_beta1 },
+       { 0x0190, 0x0190, nv04_graph_mthd_bind_surf_dst },
+       { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
        {}
 };
 
 static struct nouveau_omthds
 nv04_graph_gdi_omthds[] = {
-       { 0x0188, nv04_graph_mthd_bind_patt },
-       { 0x018c, nv04_graph_mthd_bind_rop },
-       { 0x0190, nv04_graph_mthd_bind_beta1 },
-       { 0x0194, nv04_graph_mthd_bind_beta4 },
-       { 0x0198, nv04_graph_mthd_bind_surf2d },
-       { 0x02fc, nv04_graph_mthd_set_operation },
+       { 0x0188, 0x0188, nv04_graph_mthd_bind_patt },
+       { 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
+       { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
+       { 0x0194, 0x0194, nv04_graph_mthd_bind_beta4 },
+       { 0x0198, 0x0198, nv04_graph_mthd_bind_surf2d },
+       { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
        {}
 };
 
 static struct nouveau_omthds
 nv01_graph_blit_omthds[] = {
-       { 0x0184, nv01_graph_mthd_bind_chroma },
-       { 0x0188, nv01_graph_mthd_bind_clip },
-       { 0x018c, nv01_graph_mthd_bind_patt },
-       { 0x0190, nv04_graph_mthd_bind_rop },
-       { 0x0194, nv04_graph_mthd_bind_beta1 },
-       { 0x0198, nv04_graph_mthd_bind_surf_dst },
-       { 0x019c, nv04_graph_mthd_bind_surf_src },
-       { 0x02fc, nv04_graph_mthd_set_operation },
+       { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma },
+       { 0x0188, 0x0188, nv01_graph_mthd_bind_clip },
+       { 0x018c, 0x018c, nv01_graph_mthd_bind_patt },
+       { 0x0190, 0x0190, nv04_graph_mthd_bind_rop },
+       { 0x0194, 0x0194, nv04_graph_mthd_bind_beta1 },
+       { 0x0198, 0x0198, nv04_graph_mthd_bind_surf_dst },
+       { 0x019c, 0x019c, nv04_graph_mthd_bind_surf_src },
+       { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
        {}
 };
 
 static struct nouveau_omthds
 nv04_graph_blit_omthds[] = {
-       { 0x0184, nv01_graph_mthd_bind_chroma },
-       { 0x0188, nv01_graph_mthd_bind_clip },
-       { 0x018c, nv04_graph_mthd_bind_patt },
-       { 0x0190, nv04_graph_mthd_bind_rop },
-       { 0x0194, nv04_graph_mthd_bind_beta1 },
-       { 0x0198, nv04_graph_mthd_bind_beta4 },
-       { 0x019c, nv04_graph_mthd_bind_surf2d },
-       { 0x02fc, nv04_graph_mthd_set_operation },
+       { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma },
+       { 0x0188, 0x0188, nv01_graph_mthd_bind_clip },
+       { 0x018c, 0x018c, nv04_graph_mthd_bind_patt },
+       { 0x0190, 0x0190, nv04_graph_mthd_bind_rop },
+       { 0x0194, 0x0194, nv04_graph_mthd_bind_beta1 },
+       { 0x0198, 0x0198, nv04_graph_mthd_bind_beta4 },
+       { 0x019c, 0x019c, nv04_graph_mthd_bind_surf2d },
+       { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
        {}
 };
 
 static struct nouveau_omthds
 nv04_graph_iifc_omthds[] = {
-       { 0x0188, nv01_graph_mthd_bind_chroma },
-       { 0x018c, nv01_graph_mthd_bind_clip },
-       { 0x0190, nv04_graph_mthd_bind_patt },
-       { 0x0194, nv04_graph_mthd_bind_rop },
-       { 0x0198, nv04_graph_mthd_bind_beta1 },
-       { 0x019c, nv04_graph_mthd_bind_beta4 },
-       { 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf },
-       { 0x03e4, nv04_graph_mthd_set_operation },
+       { 0x0188, 0x0188, nv01_graph_mthd_bind_chroma },
+       { 0x018c, 0x018c, nv01_graph_mthd_bind_clip },
+       { 0x0190, 0x0190, nv04_graph_mthd_bind_patt },
+       { 0x0194, 0x0194, nv04_graph_mthd_bind_rop },
+       { 0x0198, 0x0198, nv04_graph_mthd_bind_beta1 },
+       { 0x019c, 0x019c, nv04_graph_mthd_bind_beta4 },
+       { 0x01a0, 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf },
+       { 0x03e4, 0x03e4, nv04_graph_mthd_set_operation },
        {}
 };
 
 static struct nouveau_omthds
 nv01_graph_ifc_omthds[] = {
-       { 0x0184, nv01_graph_mthd_bind_chroma },
-       { 0x0188, nv01_graph_mthd_bind_clip },
-       { 0x018c, nv01_graph_mthd_bind_patt },
-       { 0x0190, nv04_graph_mthd_bind_rop },
-       { 0x0194, nv04_graph_mthd_bind_beta1 },
-       { 0x0198, nv04_graph_mthd_bind_surf_dst },
-       { 0x02fc, nv04_graph_mthd_set_operation },
+       { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma },
+       { 0x0188, 0x0188, nv01_graph_mthd_bind_clip },
+       { 0x018c, 0x018c, nv01_graph_mthd_bind_patt },
+       { 0x0190, 0x0190, nv04_graph_mthd_bind_rop },
+       { 0x0194, 0x0194, nv04_graph_mthd_bind_beta1 },
+       { 0x0198, 0x0198, nv04_graph_mthd_bind_surf_dst },
+       { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
        {}
 };
 
 static struct nouveau_omthds
 nv04_graph_ifc_omthds[] = {
-       { 0x0184, nv01_graph_mthd_bind_chroma },
-       { 0x0188, nv01_graph_mthd_bind_clip },
-       { 0x018c, nv04_graph_mthd_bind_patt },
-       { 0x0190, nv04_graph_mthd_bind_rop },
-       { 0x0194, nv04_graph_mthd_bind_beta1 },
-       { 0x0198, nv04_graph_mthd_bind_beta4 },
-       { 0x019c, nv04_graph_mthd_bind_surf2d },
-       { 0x02fc, nv04_graph_mthd_set_operation },
+       { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma },
+       { 0x0188, 0x0188, nv01_graph_mthd_bind_clip },
+       { 0x018c, 0x018c, nv04_graph_mthd_bind_patt },
+       { 0x0190, 0x0190, nv04_graph_mthd_bind_rop },
+       { 0x0194, 0x0194, nv04_graph_mthd_bind_beta1 },
+       { 0x0198, 0x0198, nv04_graph_mthd_bind_beta4 },
+       { 0x019c, 0x019c, nv04_graph_mthd_bind_surf2d },
+       { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
        {}
 };
 
 static struct nouveau_omthds
 nv03_graph_sifc_omthds[] = {
-       { 0x0184, nv01_graph_mthd_bind_chroma },
-       { 0x0188, nv01_graph_mthd_bind_patt },
-       { 0x018c, nv04_graph_mthd_bind_rop },
-       { 0x0190, nv04_graph_mthd_bind_beta1 },
-       { 0x0194, nv04_graph_mthd_bind_surf_dst },
-       { 0x02fc, nv04_graph_mthd_set_operation },
+       { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma },
+       { 0x0188, 0x0188, nv01_graph_mthd_bind_patt },
+       { 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
+       { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
+       { 0x0194, 0x0194, nv04_graph_mthd_bind_surf_dst },
+       { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
        {}
 };
 
 static struct nouveau_omthds
 nv04_graph_sifc_omthds[] = {
-       { 0x0184, nv01_graph_mthd_bind_chroma },
-       { 0x0188, nv04_graph_mthd_bind_patt },
-       { 0x018c, nv04_graph_mthd_bind_rop },
-       { 0x0190, nv04_graph_mthd_bind_beta1 },
-       { 0x0194, nv04_graph_mthd_bind_beta4 },
-       { 0x0198, nv04_graph_mthd_bind_surf2d },
-       { 0x02fc, nv04_graph_mthd_set_operation },
+       { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma },
+       { 0x0188, 0x0188, nv04_graph_mthd_bind_patt },
+       { 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
+       { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
+       { 0x0194, 0x0194, nv04_graph_mthd_bind_beta4 },
+       { 0x0198, 0x0198, nv04_graph_mthd_bind_surf2d },
+       { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
        {}
 };
 
 static struct nouveau_omthds
 nv03_graph_sifm_omthds[] = {
-       { 0x0188, nv01_graph_mthd_bind_patt },
-       { 0x018c, nv04_graph_mthd_bind_rop },
-       { 0x0190, nv04_graph_mthd_bind_beta1 },
-       { 0x0194, nv04_graph_mthd_bind_surf_dst },
-       { 0x0304, nv04_graph_mthd_set_operation },
+       { 0x0188, 0x0188, nv01_graph_mthd_bind_patt },
+       { 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
+       { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
+       { 0x0194, 0x0194, nv04_graph_mthd_bind_surf_dst },
+       { 0x0304, 0x0304, nv04_graph_mthd_set_operation },
        {}
 };
 
 static struct nouveau_omthds
 nv04_graph_sifm_omthds[] = {
-       { 0x0188, nv04_graph_mthd_bind_patt },
-       { 0x018c, nv04_graph_mthd_bind_rop },
-       { 0x0190, nv04_graph_mthd_bind_beta1 },
-       { 0x0194, nv04_graph_mthd_bind_beta4 },
-       { 0x0198, nv04_graph_mthd_bind_surf2d },
-       { 0x0304, nv04_graph_mthd_set_operation },
+       { 0x0188, 0x0188, nv04_graph_mthd_bind_patt },
+       { 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
+       { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
+       { 0x0194, 0x0194, nv04_graph_mthd_bind_beta4 },
+       { 0x0198, 0x0198, nv04_graph_mthd_bind_surf2d },
+       { 0x0304, 0x0304, nv04_graph_mthd_set_operation },
        {}
 };
 
 static struct nouveau_omthds
 nv04_graph_surf3d_omthds[] = {
-       { 0x02f8, nv04_graph_mthd_surf3d_clip_h },
-       { 0x02fc, nv04_graph_mthd_surf3d_clip_v },
+       { 0x02f8, 0x02f8, nv04_graph_mthd_surf3d_clip_h },
+       { 0x02fc, 0x02fc, nv04_graph_mthd_surf3d_clip_v },
        {}
 };
 
 static struct nouveau_omthds
 nv03_graph_ttri_omthds[] = {
-       { 0x0188, nv01_graph_mthd_bind_clip },
-       { 0x018c, nv04_graph_mthd_bind_surf_color },
-       { 0x0190, nv04_graph_mthd_bind_surf_zeta },
+       { 0x0188, 0x0188, nv01_graph_mthd_bind_clip },
+       { 0x018c, 0x018c, nv04_graph_mthd_bind_surf_color },
+       { 0x0190, 0x0190, nv04_graph_mthd_bind_surf_zeta },
        {}
 };
 
 static struct nouveau_omthds
 nv01_graph_prim_omthds[] = {
-       { 0x0184, nv01_graph_mthd_bind_clip },
-       { 0x0188, nv01_graph_mthd_bind_patt },
-       { 0x018c, nv04_graph_mthd_bind_rop },
-       { 0x0190, nv04_graph_mthd_bind_beta1 },
-       { 0x0194, nv04_graph_mthd_bind_surf_dst },
-       { 0x02fc, nv04_graph_mthd_set_operation },
+       { 0x0184, 0x0184, nv01_graph_mthd_bind_clip },
+       { 0x0188, 0x0188, nv01_graph_mthd_bind_patt },
+       { 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
+       { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
+       { 0x0194, 0x0194, nv04_graph_mthd_bind_surf_dst },
+       { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
        {}
 };
 
 static struct nouveau_omthds
 nv04_graph_prim_omthds[] = {
-       { 0x0184, nv01_graph_mthd_bind_clip },
-       { 0x0188, nv04_graph_mthd_bind_patt },
-       { 0x018c, nv04_graph_mthd_bind_rop },
-       { 0x0190, nv04_graph_mthd_bind_beta1 },
-       { 0x0194, nv04_graph_mthd_bind_beta4 },
-       { 0x0198, nv04_graph_mthd_bind_surf2d },
-       { 0x02fc, nv04_graph_mthd_set_operation },
+       { 0x0184, 0x0184, nv01_graph_mthd_bind_clip },
+       { 0x0188, 0x0188, nv04_graph_mthd_bind_patt },
+       { 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
+       { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
+       { 0x0194, 0x0194, nv04_graph_mthd_bind_beta4 },
+       { 0x0198, 0x0198, nv04_graph_mthd_bind_surf2d },
+       { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
        {}
 };
 
index 92521c8..5c0f843 100644 (file)
@@ -570,11 +570,11 @@ nv17_graph_mthd_lma_enable(struct nouveau_object *object, u32 mthd,
 
 static struct nouveau_omthds
 nv17_celcius_omthds[] = {
-       { 0x1638, nv17_graph_mthd_lma_window },
-       { 0x163c, nv17_graph_mthd_lma_window },
-       { 0x1640, nv17_graph_mthd_lma_window },
-       { 0x1644, nv17_graph_mthd_lma_window },
-       { 0x1658, nv17_graph_mthd_lma_enable },
+       { 0x1638, 0x1638, nv17_graph_mthd_lma_window },
+       { 0x163c, 0x163c, nv17_graph_mthd_lma_window },
+       { 0x1640, 0x1640, nv17_graph_mthd_lma_window },
+       { 0x1644, 0x1644, nv17_graph_mthd_lma_window },
+       { 0x1658, 0x1658, nv17_graph_mthd_lma_enable },
        {}
 };
 
index 8f3f619..5b20401 100644 (file)
@@ -183,7 +183,7 @@ nv20_graph_tile_prog(struct nouveau_engine *engine, int i)
        nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i);
        nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->addr);
 
-       if (nv_device(engine)->card_type == NV_20) {
+       if (nv_device(engine)->chipset != 0x34) {
                nv_wr32(priv, NV20_PGRAPH_ZCOMP(i), tile->zcomp);
                nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00ea0090 + 4 * i);
                nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->zcomp);
@@ -224,14 +224,14 @@ nv20_graph_intr(struct nouveau_subdev *subdev)
        nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
 
        if (show) {
-               nv_info(priv, "");
+               nv_error(priv, "");
                nouveau_bitfield_print(nv10_graph_intr_name, show);
                printk(" nsource:");
                nouveau_bitfield_print(nv04_graph_nsource, nsource);
                printk(" nstatus:");
                nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
                printk("\n");
-               nv_info(priv, "ch %d/%d class 0x%04x mthd 0x%04x data 0x%08x\n",
+               nv_error(priv, "ch %d/%d class 0x%04x mthd 0x%04x data 0x%08x\n",
                        chid, subc, class, mthd, data);
        }
 
index cc6574e..0b36dd3 100644 (file)
@@ -216,10 +216,10 @@ nv40_graph_tile_prog(struct nouveau_engine *engine, int i)
 
        switch (nv_device(priv)->chipset) {
        case 0x40:
-       case 0x41: /* guess */
+       case 0x41:
        case 0x42:
        case 0x43:
-       case 0x45: /* guess */
+       case 0x45:
        case 0x4e:
                nv_wr32(priv, NV20_PGRAPH_TSIZE(i), tile->pitch);
                nv_wr32(priv, NV20_PGRAPH_TLIMIT(i), tile->limit);
@@ -227,6 +227,21 @@ nv40_graph_tile_prog(struct nouveau_engine *engine, int i)
                nv_wr32(priv, NV40_PGRAPH_TSIZE1(i), tile->pitch);
                nv_wr32(priv, NV40_PGRAPH_TLIMIT1(i), tile->limit);
                nv_wr32(priv, NV40_PGRAPH_TILE1(i), tile->addr);
+               switch (nv_device(priv)->chipset) {
+               case 0x40:
+               case 0x45:
+                       nv_wr32(priv, NV20_PGRAPH_ZCOMP(i), tile->zcomp);
+                       nv_wr32(priv, NV40_PGRAPH_ZCOMP1(i), tile->zcomp);
+                       break;
+               case 0x41:
+               case 0x42:
+               case 0x43:
+                       nv_wr32(priv, NV41_PGRAPH_ZCOMP0(i), tile->zcomp);
+                       nv_wr32(priv, NV41_PGRAPH_ZCOMP1(i), tile->zcomp);
+                       break;
+               default:
+                       break;
+               }
                break;
        case 0x44:
        case 0x4a:
@@ -235,18 +250,31 @@ nv40_graph_tile_prog(struct nouveau_engine *engine, int i)
                nv_wr32(priv, NV20_PGRAPH_TILE(i), tile->addr);
                break;
        case 0x46:
+       case 0x4c:
        case 0x47:
        case 0x49:
        case 0x4b:
-       case 0x4c:
+       case 0x63:
        case 0x67:
-       default:
+       case 0x68:
                nv_wr32(priv, NV47_PGRAPH_TSIZE(i), tile->pitch);
                nv_wr32(priv, NV47_PGRAPH_TLIMIT(i), tile->limit);
                nv_wr32(priv, NV47_PGRAPH_TILE(i), tile->addr);
                nv_wr32(priv, NV40_PGRAPH_TSIZE1(i), tile->pitch);
                nv_wr32(priv, NV40_PGRAPH_TLIMIT1(i), tile->limit);
                nv_wr32(priv, NV40_PGRAPH_TILE1(i), tile->addr);
+               switch (nv_device(priv)->chipset) {
+               case 0x47:
+               case 0x49:
+               case 0x4b:
+                       nv_wr32(priv, NV47_PGRAPH_ZCOMP0(i), tile->zcomp);
+                       nv_wr32(priv, NV47_PGRAPH_ZCOMP1(i), tile->zcomp);
+                       break;
+               default:
+                       break;
+               }
+               break;
+       default:
                break;
        }
 
@@ -293,7 +321,7 @@ nv40_graph_intr(struct nouveau_subdev *subdev)
        nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
 
        if (show) {
-               nv_info(priv, "");
+               nv_error(priv, "");
                nouveau_bitfield_print(nv10_graph_intr_name, show);
                printk(" nsource:");
                nouveau_bitfield_print(nv04_graph_nsource, nsource);
index ab3b9dc..b1c3d83 100644 (file)
@@ -184,6 +184,65 @@ nv50_graph_tlb_flush(struct nouveau_engine *engine)
        return 0;
 }
 
+static const struct nouveau_bitfield nv50_pgraph_status[] = {
+       { 0x00000001, "BUSY" }, /* set when any bit is set */
+       { 0x00000002, "DISPATCH" },
+       { 0x00000004, "UNK2" },
+       { 0x00000008, "UNK3" },
+       { 0x00000010, "UNK4" },
+       { 0x00000020, "UNK5" },
+       { 0x00000040, "M2MF" },
+       { 0x00000080, "UNK7" },
+       { 0x00000100, "CTXPROG" },
+       { 0x00000200, "VFETCH" },
+       { 0x00000400, "CCACHE_UNK4" },
+       { 0x00000800, "STRMOUT_GSCHED_UNK5" },
+       { 0x00001000, "UNK14XX" },
+       { 0x00002000, "UNK24XX_CSCHED" },
+       { 0x00004000, "UNK1CXX" },
+       { 0x00008000, "CLIPID" },
+       { 0x00010000, "ZCULL" },
+       { 0x00020000, "ENG2D" },
+       { 0x00040000, "UNK34XX" },
+       { 0x00080000, "TPRAST" },
+       { 0x00100000, "TPROP" },
+       { 0x00200000, "TEX" },
+       { 0x00400000, "TPVP" },
+       { 0x00800000, "MP" },
+       { 0x01000000, "ROP" },
+       {}
+};
+
+static const char *const nv50_pgraph_vstatus_0[] = {
+       "VFETCH", "CCACHE", "UNK4", "UNK5", "GSCHED", "STRMOUT", "UNK14XX", NULL
+};
+
+static const char *const nv50_pgraph_vstatus_1[] = {
+       "TPRAST", "TPROP", "TEXTURE", "TPVP", "MP", NULL
+};
+
+static const char *const nv50_pgraph_vstatus_2[] = {
+       "UNK24XX", "CSCHED", "UNK1CXX", "CLIPID", "ZCULL", "ENG2D", "UNK34XX",
+       "ROP", NULL
+};
+
+static void nouveau_pgraph_vstatus_print(struct nv50_graph_priv *priv, int r,
+               const char *const units[], u32 status)
+{
+       int i;
+
+       nv_error(priv, "PGRAPH_VSTATUS%d: 0x%08x", r, status);
+
+       for (i = 0; units[i] && status; i++) {
+               if ((status & 7) == 1)
+                       pr_cont(" %s", units[i]);
+               status >>= 3;
+       }
+       if (status)
+               pr_cont(" (invalid: 0x%x)", status);
+       pr_cont("\n");
+}
+
 static int
 nv84_graph_tlb_flush(struct nouveau_engine *engine)
 {
@@ -219,10 +278,19 @@ nv84_graph_tlb_flush(struct nouveau_engine *engine)
                 !(timeout = ptimer->read(ptimer) - start > 2000000000));
 
        if (timeout) {
-               nv_error(priv, "PGRAPH TLB flush idle timeout fail: "
-                             "0x%08x 0x%08x 0x%08x 0x%08x\n",
-                        nv_rd32(priv, 0x400700), nv_rd32(priv, 0x400380),
-                        nv_rd32(priv, 0x400384), nv_rd32(priv, 0x400388));
+               nv_error(priv, "PGRAPH TLB flush idle timeout fail\n");
+
+               tmp = nv_rd32(priv, 0x400700);
+               nv_error(priv, "PGRAPH_STATUS  : 0x%08x", tmp);
+               nouveau_bitfield_print(nv50_pgraph_status, tmp);
+               pr_cont("\n");
+
+               nouveau_pgraph_vstatus_print(priv, 0, nv50_pgraph_vstatus_0,
+                               nv_rd32(priv, 0x400380));
+               nouveau_pgraph_vstatus_print(priv, 1, nv50_pgraph_vstatus_1,
+                               nv_rd32(priv, 0x400384));
+               nouveau_pgraph_vstatus_print(priv, 2, nv50_pgraph_vstatus_2,
+                               nv_rd32(priv, 0x400388));
        }
 
        nv50_vm_flush_engine(&engine->base, 0x00);
@@ -453,13 +521,13 @@ nv50_priv_tp_trap(struct nv50_graph_priv *priv, int type, u32 ustatus_old,
                }
                if (ustatus) {
                        if (display)
-                               nv_info(priv, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
+                               nv_error(priv, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
                }
                nv_wr32(priv, ustatus_addr, 0xc0000000);
        }
 
        if (!tps && display)
-               nv_info(priv, "%s - No TPs claiming errors?\n", name);
+               nv_warn(priv, "%s - No TPs claiming errors?\n", name);
 }
 
 static int
@@ -718,13 +786,12 @@ nv50_graph_intr(struct nouveau_subdev *subdev)
        nv_wr32(priv, 0x400500, 0x00010001);
 
        if (show) {
-               nv_info(priv, "");
+               nv_error(priv, "");
                nouveau_bitfield_print(nv50_graph_intr_name, show);
                printk("\n");
                nv_error(priv, "ch %d [0x%010llx] subc %d class 0x%04x "
                               "mthd 0x%04x data 0x%08x\n",
                         chid, (u64)inst << 12, subc, class, mthd, data);
-               nv50_fb_trap(nouveau_fb(priv), 1);
        }
 
        if (nv_rd32(priv, 0x400824) & (1 << 31))
index c62f2d0..47a0208 100644 (file)
@@ -814,7 +814,7 @@ nvc0_graph_init_ctxctl(struct nvc0_graph_priv *priv)
                nv_wr32(priv, 0x41a100, 0x00000002);
                nv_wr32(priv, 0x409100, 0x00000002);
                if (!nv_wait(priv, 0x409800, 0x00000001, 0x00000001))
-                       nv_info(priv, "0x409800 wait failed\n");
+                       nv_warn(priv, "0x409800 wait failed\n");
 
                nv_wr32(priv, 0x409840, 0xffffffff);
                nv_wr32(priv, 0x409500, 0x7fffffff);
index 9c715a2..fde8e24 100644 (file)
 #define NV20_PGRAPH_TSIZE(i)                               (0x00400908 + (i*16))
 #define NV20_PGRAPH_TSTATUS(i)                             (0x0040090C + (i*16))
 #define NV20_PGRAPH_ZCOMP(i)                               (0x00400980 + 4*(i))
+#define NV41_PGRAPH_ZCOMP0(i)                              (0x004009c0 + 4*(i))
 #define NV10_PGRAPH_TILE(i)                                (0x00400B00 + (i*16))
 #define NV10_PGRAPH_TLIMIT(i)                              (0x00400B04 + (i*16))
 #define NV10_PGRAPH_TSIZE(i)                               (0x00400B08 + (i*16))
 #define NV47_PGRAPH_TSTATUS(i)                             (0x00400D0C + (i*16))
 #define NV04_PGRAPH_V_RAM                                  0x00400D40
 #define NV04_PGRAPH_W_RAM                                  0x00400D80
+#define NV47_PGRAPH_ZCOMP0(i)                              (0x00400e00 + 4*(i))
 #define NV10_PGRAPH_COMBINER0_IN_ALPHA                     0x00400E40
 #define NV10_PGRAPH_COMBINER1_IN_ALPHA                     0x00400E44
 #define NV10_PGRAPH_COMBINER0_IN_RGB                       0x00400E48
 #define NV04_PGRAPH_DMA_B_OFFSET                           0x00401098
 #define NV04_PGRAPH_DMA_B_SIZE                             0x0040109C
 #define NV04_PGRAPH_DMA_B_Y_SIZE                           0x004010A0
+#define NV47_PGRAPH_ZCOMP1(i)                              (0x004068c0 + 4*(i))
 #define NV40_PGRAPH_TILE1(i)                               (0x00406900 + (i*16))
 #define NV40_PGRAPH_TLIMIT1(i)                             (0x00406904 + (i*16))
 #define NV40_PGRAPH_TSIZE1(i)                              (0x00406908 + (i*16))
 #define NV40_PGRAPH_TSTATUS1(i)                            (0x0040690C + (i*16))
+#define NV40_PGRAPH_ZCOMP1(i)                              (0x00406980 + 4*(i))
+#define NV41_PGRAPH_ZCOMP1(i)                              (0x004069c0 + 4*(i))
 
 #endif
index 1f394a2..9fd8637 100644 (file)
@@ -121,9 +121,9 @@ nv31_mpeg_ofuncs = {
 
 static struct nouveau_omthds
 nv31_mpeg_omthds[] = {
-       { 0x0190, nv31_mpeg_mthd_dma },
-       { 0x01a0, nv31_mpeg_mthd_dma },
-       { 0x01b0, nv31_mpeg_mthd_dma },
+       { 0x0190, 0x0190, nv31_mpeg_mthd_dma },
+       { 0x01a0, 0x01a0, nv31_mpeg_mthd_dma },
+       { 0x01b0, 0x01b0, nv31_mpeg_mthd_dma },
        {}
 };
 
index 8678a99..bc7d12b 100644 (file)
@@ -157,7 +157,6 @@ nv50_mpeg_intr(struct nouveau_subdev *subdev)
 
        nv_wr32(priv, 0x00b100, stat);
        nv_wr32(priv, 0x00b230, 0x00000001);
-       nv50_fb_trap(nouveau_fb(priv), 1);
 }
 
 static void
index 50e7e0d..5a5b2a7 100644 (file)
  * Authors: Ben Skeggs
  */
 
-#include <core/os.h>
-#include <core/class.h>
+#include <core/engine.h>
 #include <core/engctx.h>
+#include <core/class.h>
 
 #include <engine/ppp.h>
 
 struct nv98_ppp_priv {
-       struct nouveau_ppp base;
+       struct nouveau_engine base;
 };
 
 struct nv98_ppp_chan {
-       struct nouveau_ppp_chan base;
+       struct nouveau_engctx base;
 };
 
 /*******************************************************************************
@@ -49,61 +49,16 @@ nv98_ppp_sclass[] = {
  * PPPP context
  ******************************************************************************/
 
-static int
-nv98_ppp_context_ctor(struct nouveau_object *parent,
-                     struct nouveau_object *engine,
-                     struct nouveau_oclass *oclass, void *data, u32 size,
-                     struct nouveau_object **pobject)
-{
-       struct nv98_ppp_chan *priv;
-       int ret;
-
-       ret = nouveau_ppp_context_create(parent, engine, oclass, NULL,
-                                        0, 0, 0, &priv);
-       *pobject = nv_object(priv);
-       if (ret)
-               return ret;
-
-       return 0;
-}
-
-static void
-nv98_ppp_context_dtor(struct nouveau_object *object)
-{
-       struct nv98_ppp_chan *priv = (void *)object;
-       nouveau_ppp_context_destroy(&priv->base);
-}
-
-static int
-nv98_ppp_context_init(struct nouveau_object *object)
-{
-       struct nv98_ppp_chan *priv = (void *)object;
-       int ret;
-
-       ret = nouveau_ppp_context_init(&priv->base);
-       if (ret)
-               return ret;
-
-       return 0;
-}
-
-static int
-nv98_ppp_context_fini(struct nouveau_object *object, bool suspend)
-{
-       struct nv98_ppp_chan *priv = (void *)object;
-       return nouveau_ppp_context_fini(&priv->base, suspend);
-}
-
 static struct nouveau_oclass
 nv98_ppp_cclass = {
        .handle = NV_ENGCTX(PPP, 0x98),
        .ofuncs = &(struct nouveau_ofuncs) {
-               .ctor = nv98_ppp_context_ctor,
-               .dtor = nv98_ppp_context_dtor,
-               .init = nv98_ppp_context_init,
-               .fini = nv98_ppp_context_fini,
-               .rd32 = _nouveau_ppp_context_rd32,
-               .wr32 = _nouveau_ppp_context_wr32,
+               .ctor = _nouveau_engctx_ctor,
+               .dtor = _nouveau_engctx_dtor,
+               .init = _nouveau_engctx_init,
+               .fini = _nouveau_engctx_fini,
+               .rd32 = _nouveau_engctx_rd32,
+               .wr32 = _nouveau_engctx_wr32,
        },
 };
 
@@ -111,11 +66,6 @@ nv98_ppp_cclass = {
  * PPPP engine/subdev functions
  ******************************************************************************/
 
-static void
-nv98_ppp_intr(struct nouveau_subdev *subdev)
-{
-}
-
 static int
 nv98_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
              struct nouveau_oclass *oclass, void *data, u32 size,
@@ -124,52 +74,25 @@ nv98_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        struct nv98_ppp_priv *priv;
        int ret;
 
-       ret = nouveau_ppp_create(parent, engine, oclass, &priv);
+       ret = nouveau_engine_create(parent, engine, oclass, true,
+                                   "PPPP", "ppp", &priv);
        *pobject = nv_object(priv);
        if (ret)
                return ret;
 
        nv_subdev(priv)->unit = 0x00400002;
-       nv_subdev(priv)->intr = nv98_ppp_intr;
        nv_engine(priv)->cclass = &nv98_ppp_cclass;
        nv_engine(priv)->sclass = nv98_ppp_sclass;
        return 0;
 }
 
-static void
-nv98_ppp_dtor(struct nouveau_object *object)
-{
-       struct nv98_ppp_priv *priv = (void *)object;
-       nouveau_ppp_destroy(&priv->base);
-}
-
-static int
-nv98_ppp_init(struct nouveau_object *object)
-{
-       struct nv98_ppp_priv *priv = (void *)object;
-       int ret;
-
-       ret = nouveau_ppp_init(&priv->base);
-       if (ret)
-               return ret;
-
-       return 0;
-}
-
-static int
-nv98_ppp_fini(struct nouveau_object *object, bool suspend)
-{
-       struct nv98_ppp_priv *priv = (void *)object;
-       return nouveau_ppp_fini(&priv->base, suspend);
-}
-
 struct nouveau_oclass
 nv98_ppp_oclass = {
        .handle = NV_ENGINE(PPP, 0x98),
        .ofuncs = &(struct nouveau_ofuncs) {
                .ctor = nv98_ppp_ctor,
-               .dtor = nv98_ppp_dtor,
-               .init = nv98_ppp_init,
-               .fini = nv98_ppp_fini,
+               .dtor = _nouveau_engine_dtor,
+               .init = _nouveau_engine_init,
+               .fini = _nouveau_engine_fini,
        },
 };
diff --git a/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c
new file mode 100644 (file)
index 0000000..ebf0d86
--- /dev/null
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2012 Maarten Lankhorst
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Maarten Lankhorst
+ */
+
+#include <core/falcon.h>
+
+#include <engine/ppp.h>
+
+struct nvc0_ppp_priv {
+       struct nouveau_falcon base;
+};
+
+/*******************************************************************************
+ * PPP object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nvc0_ppp_sclass[] = {
+       { 0x90b3, &nouveau_object_ofuncs },
+       {},
+};
+
+/*******************************************************************************
+ * PPPP context
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nvc0_ppp_cclass = {
+       .handle = NV_ENGCTX(PPP, 0xc0),
+       .ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = _nouveau_falcon_context_ctor,
+               .dtor = _nouveau_falcon_context_dtor,
+               .init = _nouveau_falcon_context_init,
+               .fini = _nouveau_falcon_context_fini,
+               .rd32 = _nouveau_falcon_context_rd32,
+               .wr32 = _nouveau_falcon_context_wr32,
+       },
+};
+
+/*******************************************************************************
+ * PPPP engine/subdev functions
+ ******************************************************************************/
+
+static int
+nvc0_ppp_init(struct nouveau_object *object)
+{
+       struct nvc0_ppp_priv *priv = (void *)object;
+       int ret;
+
+       ret = nouveau_falcon_init(&priv->base);
+       if (ret)
+               return ret;
+
+       nv_wr32(priv, 0x086010, 0x0000fff2);
+       nv_wr32(priv, 0x08601c, 0x0000fff2);
+       return 0;
+}
+
+static int
+nvc0_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+             struct nouveau_oclass *oclass, void *data, u32 size,
+             struct nouveau_object **pobject)
+{
+       struct nvc0_ppp_priv *priv;
+       int ret;
+
+       ret = nouveau_falcon_create(parent, engine, oclass, 0x086000, true,
+                                   "PPPP", "ppp", &priv);
+       *pobject = nv_object(priv);
+       if (ret)
+               return ret;
+
+       nv_subdev(priv)->unit = 0x00000002;
+       nv_engine(priv)->cclass = &nvc0_ppp_cclass;
+       nv_engine(priv)->sclass = nvc0_ppp_sclass;
+       return 0;
+}
+
+struct nouveau_oclass
+nvc0_ppp_oclass = {
+       .handle = NV_ENGINE(PPP, 0xc0),
+       .ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = nvc0_ppp_ctor,
+               .dtor = _nouveau_falcon_dtor,
+               .init = nvc0_ppp_init,
+               .fini = _nouveau_falcon_fini,
+               .rd32 = _nouveau_falcon_rd32,
+               .wr32 = _nouveau_falcon_wr32,
+       },
+};
index 3ca4c3a..2a859a3 100644 (file)
@@ -63,8 +63,8 @@ nv04_software_flip(struct nouveau_object *object, u32 mthd,
 
 static struct nouveau_omthds
 nv04_software_omthds[] = {
-       { 0x0150, nv04_software_set_ref },
-       { 0x0500, nv04_software_flip },
+       { 0x0150, 0x0150, nv04_software_set_ref },
+       { 0x0500, 0x0500, nv04_software_flip },
        {}
 };
 
index 6e699af..a019364 100644 (file)
@@ -52,7 +52,7 @@ nv10_software_flip(struct nouveau_object *object, u32 mthd,
 
 static struct nouveau_omthds
 nv10_software_omthds[] = {
-       { 0x0500, nv10_software_flip },
+       { 0x0500, 0x0500, nv10_software_flip },
        {}
 };
 
index a2edcd3..b0e7e1c 100644 (file)
@@ -117,11 +117,11 @@ nv50_software_mthd_flip(struct nouveau_object *object, u32 mthd,
 
 static struct nouveau_omthds
 nv50_software_omthds[] = {
-       { 0x018c, nv50_software_mthd_dma_vblsem },
-       { 0x0400, nv50_software_mthd_vblsem_offset },
-       { 0x0404, nv50_software_mthd_vblsem_value },
-       { 0x0408, nv50_software_mthd_vblsem_release },
-       { 0x0500, nv50_software_mthd_flip },
+       { 0x018c, 0x018c, nv50_software_mthd_dma_vblsem },
+       { 0x0400, 0x0400, nv50_software_mthd_vblsem_offset },
+       { 0x0404, 0x0404, nv50_software_mthd_vblsem_value },
+       { 0x0408, 0x0408, nv50_software_mthd_vblsem_release },
+       { 0x0500, 0x0500, nv50_software_mthd_flip },
        {}
 };
 
index b7b0d7e..282a1cd 100644 (file)
@@ -99,11 +99,11 @@ nvc0_software_mthd_flip(struct nouveau_object *object, u32 mthd,
 
 static struct nouveau_omthds
 nvc0_software_omthds[] = {
-       { 0x0400, nvc0_software_mthd_vblsem_offset },
-       { 0x0404, nvc0_software_mthd_vblsem_offset },
-       { 0x0408, nvc0_software_mthd_vblsem_value },
-       { 0x040c, nvc0_software_mthd_vblsem_release },
-       { 0x0500, nvc0_software_mthd_flip },
+       { 0x0400, 0x0400, nvc0_software_mthd_vblsem_offset },
+       { 0x0404, 0x0404, nvc0_software_mthd_vblsem_offset },
+       { 0x0408, 0x0408, nvc0_software_mthd_vblsem_value },
+       { 0x040c, 0x040c, nvc0_software_mthd_vblsem_release },
+       { 0x0500, 0x0500, nvc0_software_mthd_flip },
        {}
 };
 
index dd23c80..261cd96 100644 (file)
  * Authors: Ben Skeggs
  */
 
-#include <core/os.h>
-#include <core/class.h>
 #include <core/engctx.h>
+#include <core/class.h>
 
 #include <engine/vp.h>
 
 struct nv84_vp_priv {
-       struct nouveau_vp base;
-};
-
-struct nv84_vp_chan {
-       struct nouveau_vp_chan base;
+       struct nouveau_engine base;
 };
 
 /*******************************************************************************
@@ -49,61 +44,16 @@ nv84_vp_sclass[] = {
  * PVP context
  ******************************************************************************/
 
-static int
-nv84_vp_context_ctor(struct nouveau_object *parent,
-                    struct nouveau_object *engine,
-                    struct nouveau_oclass *oclass, void *data, u32 size,
-                    struct nouveau_object **pobject)
-{
-       struct nv84_vp_chan *priv;
-       int ret;
-
-       ret = nouveau_vp_context_create(parent, engine, oclass, NULL,
-                                       0, 0, 0, &priv);
-       *pobject = nv_object(priv);
-       if (ret)
-               return ret;
-
-       return 0;
-}
-
-static void
-nv84_vp_context_dtor(struct nouveau_object *object)
-{
-       struct nv84_vp_chan *priv = (void *)object;
-       nouveau_vp_context_destroy(&priv->base);
-}
-
-static int
-nv84_vp_context_init(struct nouveau_object *object)
-{
-       struct nv84_vp_chan *priv = (void *)object;
-       int ret;
-
-       ret = nouveau_vp_context_init(&priv->base);
-       if (ret)
-               return ret;
-
-       return 0;
-}
-
-static int
-nv84_vp_context_fini(struct nouveau_object *object, bool suspend)
-{
-       struct nv84_vp_chan *priv = (void *)object;
-       return nouveau_vp_context_fini(&priv->base, suspend);
-}
-
 static struct nouveau_oclass
 nv84_vp_cclass = {
        .handle = NV_ENGCTX(VP, 0x84),
        .ofuncs = &(struct nouveau_ofuncs) {
-               .ctor = nv84_vp_context_ctor,
-               .dtor = nv84_vp_context_dtor,
-               .init = nv84_vp_context_init,
-               .fini = nv84_vp_context_fini,
-               .rd32 = _nouveau_vp_context_rd32,
-               .wr32 = _nouveau_vp_context_wr32,
+               .ctor = _nouveau_engctx_ctor,
+               .dtor = _nouveau_engctx_dtor,
+               .init = _nouveau_engctx_init,
+               .fini = _nouveau_engctx_fini,
+               .rd32 = _nouveau_engctx_rd32,
+               .wr32 = _nouveau_engctx_wr32,
        },
 };
 
@@ -111,11 +61,6 @@ nv84_vp_cclass = {
  * PVP engine/subdev functions
  ******************************************************************************/
 
-static void
-nv84_vp_intr(struct nouveau_subdev *subdev)
-{
-}
-
 static int
 nv84_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
             struct nouveau_oclass *oclass, void *data, u32 size,
@@ -124,52 +69,25 @@ nv84_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        struct nv84_vp_priv *priv;
        int ret;
 
-       ret = nouveau_vp_create(parent, engine, oclass, &priv);
+       ret = nouveau_engine_create(parent, engine, oclass, true,
+                                   "PVP", "vp", &priv);
        *pobject = nv_object(priv);
        if (ret)
                return ret;
 
        nv_subdev(priv)->unit = 0x01020000;
-       nv_subdev(priv)->intr = nv84_vp_intr;
        nv_engine(priv)->cclass = &nv84_vp_cclass;
        nv_engine(priv)->sclass = nv84_vp_sclass;
        return 0;
 }
 
-static void
-nv84_vp_dtor(struct nouveau_object *object)
-{
-       struct nv84_vp_priv *priv = (void *)object;
-       nouveau_vp_destroy(&priv->base);
-}
-
-static int
-nv84_vp_init(struct nouveau_object *object)
-{
-       struct nv84_vp_priv *priv = (void *)object;
-       int ret;
-
-       ret = nouveau_vp_init(&priv->base);
-       if (ret)
-               return ret;
-
-       return 0;
-}
-
-static int
-nv84_vp_fini(struct nouveau_object *object, bool suspend)
-{
-       struct nv84_vp_priv *priv = (void *)object;
-       return nouveau_vp_fini(&priv->base, suspend);
-}
-
 struct nouveau_oclass
 nv84_vp_oclass = {
        .handle = NV_ENGINE(VP, 0x84),
        .ofuncs = &(struct nouveau_ofuncs) {
                .ctor = nv84_vp_ctor,
-               .dtor = nv84_vp_dtor,
-               .init = nv84_vp_init,
-               .fini = nv84_vp_fini,
+               .dtor = _nouveau_engine_dtor,
+               .init = _nouveau_engine_init,
+               .fini = _nouveau_engine_fini,
        },
 };
diff --git a/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c
new file mode 100644 (file)
index 0000000..f761949
--- /dev/null
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2012 Maarten Lankhorst
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Maarten Lankhorst
+ */
+
+#include <core/falcon.h>
+
+#include <engine/vp.h>
+
+struct nvc0_vp_priv {
+       struct nouveau_falcon base;
+};
+
+/*******************************************************************************
+ * VP object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nvc0_vp_sclass[] = {
+       { 0x90b2, &nouveau_object_ofuncs },
+       {},
+};
+
+/*******************************************************************************
+ * PVP context
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nvc0_vp_cclass = {
+       .handle = NV_ENGCTX(VP, 0xc0),
+       .ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = _nouveau_falcon_context_ctor,
+               .dtor = _nouveau_falcon_context_dtor,
+               .init = _nouveau_falcon_context_init,
+               .fini = _nouveau_falcon_context_fini,
+               .rd32 = _nouveau_falcon_context_rd32,
+               .wr32 = _nouveau_falcon_context_wr32,
+       },
+};
+
+/*******************************************************************************
+ * PVP engine/subdev functions
+ ******************************************************************************/
+
+static int
+nvc0_vp_init(struct nouveau_object *object)
+{
+       struct nvc0_vp_priv *priv = (void *)object;
+       int ret;
+
+       ret = nouveau_falcon_init(&priv->base);
+       if (ret)
+               return ret;
+
+       nv_wr32(priv, 0x085010, 0x0000fff2);
+       nv_wr32(priv, 0x08501c, 0x0000fff2);
+       return 0;
+}
+
+static int
+nvc0_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+            struct nouveau_oclass *oclass, void *data, u32 size,
+            struct nouveau_object **pobject)
+{
+       struct nvc0_vp_priv *priv;
+       int ret;
+
+       ret = nouveau_falcon_create(parent, engine, oclass, 0x085000, true,
+                                   "PVP", "vp", &priv);
+       *pobject = nv_object(priv);
+       if (ret)
+               return ret;
+
+       nv_subdev(priv)->unit = 0x00020000;
+       nv_engine(priv)->cclass = &nvc0_vp_cclass;
+       nv_engine(priv)->sclass = nvc0_vp_sclass;
+       return 0;
+}
+
+struct nouveau_oclass
+nvc0_vp_oclass = {
+       .handle = NV_ENGINE(VP, 0xc0),
+       .ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = nvc0_vp_ctor,
+               .dtor = _nouveau_falcon_dtor,
+               .init = nvc0_vp_init,
+               .fini = _nouveau_falcon_fini,
+               .rd32 = _nouveau_falcon_rd32,
+               .wr32 = _nouveau_falcon_wr32,
+       },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c
new file mode 100644 (file)
index 0000000..2384ce5
--- /dev/null
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/falcon.h>
+
+#include <engine/vp.h>
+
+struct nve0_vp_priv {
+       struct nouveau_falcon base;
+};
+
+/*******************************************************************************
+ * VP object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nve0_vp_sclass[] = {
+       { 0x95b2, &nouveau_object_ofuncs },
+       {},
+};
+
+/*******************************************************************************
+ * PVP context
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nve0_vp_cclass = {
+       .handle = NV_ENGCTX(VP, 0xe0),
+       .ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = _nouveau_falcon_context_ctor,
+               .dtor = _nouveau_falcon_context_dtor,
+               .init = _nouveau_falcon_context_init,
+               .fini = _nouveau_falcon_context_fini,
+               .rd32 = _nouveau_falcon_context_rd32,
+               .wr32 = _nouveau_falcon_context_wr32,
+       },
+};
+
+/*******************************************************************************
+ * PVP engine/subdev functions
+ ******************************************************************************/
+
+static int
+nve0_vp_init(struct nouveau_object *object)
+{
+       struct nve0_vp_priv *priv = (void *)object;
+       int ret;
+
+       ret = nouveau_falcon_init(&priv->base);
+       if (ret)
+               return ret;
+
+       nv_wr32(priv, 0x085010, 0x0000fff2);
+       nv_wr32(priv, 0x08501c, 0x0000fff2);
+       return 0;
+}
+
+static int
+nve0_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+            struct nouveau_oclass *oclass, void *data, u32 size,
+            struct nouveau_object **pobject)
+{
+       struct nve0_vp_priv *priv;
+       int ret;
+
+       ret = nouveau_falcon_create(parent, engine, oclass, 0x085000, true,
+                                   "PVP", "vp", &priv);
+       *pobject = nv_object(priv);
+       if (ret)
+               return ret;
+
+       nv_subdev(priv)->unit = 0x00020000;
+       nv_engine(priv)->cclass = &nve0_vp_cclass;
+       nv_engine(priv)->sclass = nve0_vp_sclass;
+       return 0;
+}
+
+struct nouveau_oclass
+nve0_vp_oclass = {
+       .handle = NV_ENGINE(VP, 0xe0),
+       .ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = nve0_vp_ctor,
+               .dtor = _nouveau_falcon_dtor,
+               .init = nve0_vp_init,
+               .fini = _nouveau_falcon_fini,
+               .rd32 = _nouveau_falcon_rd32,
+               .wr32 = _nouveau_falcon_wr32,
+       },
+};
index 6180ae9..47c4b3a 100644 (file)
@@ -23,6 +23,7 @@
 #define NV_DEVICE_DISABLE_COPY0                           0x0000008000000000ULL
 #define NV_DEVICE_DISABLE_COPY1                           0x0000010000000000ULL
 #define NV_DEVICE_DISABLE_UNK1C1                          0x0000020000000000ULL
+#define NV_DEVICE_DISABLE_VENC                            0x0000040000000000ULL
 
 struct nv_device_class {
        u64 device;     /* device identifier, ~0 for client default */
@@ -52,11 +53,49 @@ struct nv_device_class {
 #define NV_DMA_ACCESS_WR                                             0x00000200
 #define NV_DMA_ACCESS_RDWR                                           0x00000300
 
+/* NV50:NVC0 */
+#define NV50_DMA_CONF0_ENABLE                                        0x80000000
+#define NV50_DMA_CONF0_PRIV                                          0x00300000
+#define NV50_DMA_CONF0_PRIV_VM                                       0x00000000
+#define NV50_DMA_CONF0_PRIV_US                                       0x00100000
+#define NV50_DMA_CONF0_PRIV__S                                       0x00200000
+#define NV50_DMA_CONF0_PART                                          0x00030000
+#define NV50_DMA_CONF0_PART_VM                                       0x00000000
+#define NV50_DMA_CONF0_PART_256                                      0x00010000
+#define NV50_DMA_CONF0_PART_1KB                                      0x00020000
+#define NV50_DMA_CONF0_COMP                                          0x00000180
+#define NV50_DMA_CONF0_COMP_NONE                                     0x00000000
+#define NV50_DMA_CONF0_COMP_VM                                       0x00000180
+#define NV50_DMA_CONF0_TYPE                                          0x0000007f
+#define NV50_DMA_CONF0_TYPE_LINEAR                                   0x00000000
+#define NV50_DMA_CONF0_TYPE_VM                                       0x0000007f
+
+/* NVC0:NVD9 */
+#define NVC0_DMA_CONF0_ENABLE                                        0x80000000
+#define NVC0_DMA_CONF0_PRIV                                          0x00300000
+#define NVC0_DMA_CONF0_PRIV_VM                                       0x00000000
+#define NVC0_DMA_CONF0_PRIV_US                                       0x00100000
+#define NVC0_DMA_CONF0_PRIV__S                                       0x00200000
+#define NVC0_DMA_CONF0_UNKN /* PART? */                              0x00030000
+#define NVC0_DMA_CONF0_TYPE                                          0x000000ff
+#define NVC0_DMA_CONF0_TYPE_LINEAR                                   0x00000000
+#define NVC0_DMA_CONF0_TYPE_VM                                       0x000000ff
+
+/* NVD9- */
+#define NVD0_DMA_CONF0_ENABLE                                        0x80000000
+#define NVD0_DMA_CONF0_PAGE                                          0x00000400
+#define NVD0_DMA_CONF0_PAGE_LP                                       0x00000000
+#define NVD0_DMA_CONF0_PAGE_SP                                       0x00000400
+#define NVD0_DMA_CONF0_TYPE                                          0x000000ff
+#define NVD0_DMA_CONF0_TYPE_LINEAR                                   0x00000000
+#define NVD0_DMA_CONF0_TYPE_VM                                       0x000000ff
+
 struct nv_dma_class {
        u32 flags;
        u32 pad0;
        u64 start;
        u64 limit;
+       u32 conf0;
 };
 
 /* DMA FIFO channel classes
@@ -115,4 +154,190 @@ struct nve0_channel_ind_class {
        u32 engine;
 };
 
+/* 5070: NV50_DISP
+ * 8270: NV84_DISP
+ * 8370: NVA0_DISP
+ * 8870: NV94_DISP
+ * 8570: NVA3_DISP
+ * 9070: NVD0_DISP
+ * 9170: NVE0_DISP
+ */
+
+#define NV50_DISP_CLASS                                              0x00005070
+#define NV84_DISP_CLASS                                              0x00008270
+#define NVA0_DISP_CLASS                                              0x00008370
+#define NV94_DISP_CLASS                                              0x00008870
+#define NVA3_DISP_CLASS                                              0x00008570
+#define NVD0_DISP_CLASS                                              0x00009070
+#define NVE0_DISP_CLASS                                              0x00009170
+
+#define NV50_DISP_SOR_MTHD                                           0x00010000
+#define NV50_DISP_SOR_MTHD_TYPE                                      0x0000f000
+#define NV50_DISP_SOR_MTHD_HEAD                                      0x00000018
+#define NV50_DISP_SOR_MTHD_LINK                                      0x00000004
+#define NV50_DISP_SOR_MTHD_OR                                        0x00000003
+
+#define NV50_DISP_SOR_PWR                                            0x00010000
+#define NV50_DISP_SOR_PWR_STATE                                      0x00000001
+#define NV50_DISP_SOR_PWR_STATE_ON                                   0x00000001
+#define NV50_DISP_SOR_PWR_STATE_OFF                                  0x00000000
+#define NVA3_DISP_SOR_HDA_ELD                                        0x00010100
+#define NV84_DISP_SOR_HDMI_PWR                                       0x00012000
+#define NV84_DISP_SOR_HDMI_PWR_STATE                                 0x40000000
+#define NV84_DISP_SOR_HDMI_PWR_STATE_OFF                             0x00000000
+#define NV84_DISP_SOR_HDMI_PWR_STATE_ON                              0x40000000
+#define NV84_DISP_SOR_HDMI_PWR_MAX_AC_PACKET                         0x001f0000
+#define NV84_DISP_SOR_HDMI_PWR_REKEY                                 0x0000007f
+#define NV50_DISP_SOR_LVDS_SCRIPT                                    0x00013000
+#define NV50_DISP_SOR_LVDS_SCRIPT_ID                                 0x0000ffff
+#define NV94_DISP_SOR_DP_TRAIN                                       0x00016000
+#define NV94_DISP_SOR_DP_TRAIN_OP                                    0xf0000000
+#define NV94_DISP_SOR_DP_TRAIN_OP_PATTERN                            0x00000000
+#define NV94_DISP_SOR_DP_TRAIN_OP_INIT                               0x10000000
+#define NV94_DISP_SOR_DP_TRAIN_OP_FINI                               0x20000000
+#define NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD                           0x00000001
+#define NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_OFF                       0x00000000
+#define NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_ON                        0x00000001
+#define NV94_DISP_SOR_DP_TRAIN_PATTERN                               0x00000003
+#define NV94_DISP_SOR_DP_TRAIN_PATTERN_DISABLED                      0x00000000
+#define NV94_DISP_SOR_DP_LNKCTL                                      0x00016040
+#define NV94_DISP_SOR_DP_LNKCTL_FRAME                                0x80000000
+#define NV94_DISP_SOR_DP_LNKCTL_FRAME_STD                            0x00000000
+#define NV94_DISP_SOR_DP_LNKCTL_FRAME_ENH                            0x80000000
+#define NV94_DISP_SOR_DP_LNKCTL_WIDTH                                0x00001f00
+#define NV94_DISP_SOR_DP_LNKCTL_COUNT                                0x00000007
+#define NV94_DISP_SOR_DP_DRVCTL(l)                     ((l) * 0x40 + 0x00016100)
+#define NV94_DISP_SOR_DP_DRVCTL_VS                                   0x00000300
+#define NV94_DISP_SOR_DP_DRVCTL_PE                                   0x00000003
+
+#define NV50_DISP_DAC_MTHD                                           0x00020000
+#define NV50_DISP_DAC_MTHD_TYPE                                      0x0000f000
+#define NV50_DISP_DAC_MTHD_OR                                        0x00000003
+
+#define NV50_DISP_DAC_PWR                                            0x00020000
+#define NV50_DISP_DAC_PWR_HSYNC                                      0x00000001
+#define NV50_DISP_DAC_PWR_HSYNC_ON                                   0x00000000
+#define NV50_DISP_DAC_PWR_HSYNC_LO                                   0x00000001
+#define NV50_DISP_DAC_PWR_VSYNC                                      0x00000004
+#define NV50_DISP_DAC_PWR_VSYNC_ON                                   0x00000000
+#define NV50_DISP_DAC_PWR_VSYNC_LO                                   0x00000004
+#define NV50_DISP_DAC_PWR_DATA                                       0x00000010
+#define NV50_DISP_DAC_PWR_DATA_ON                                    0x00000000
+#define NV50_DISP_DAC_PWR_DATA_LO                                    0x00000010
+#define NV50_DISP_DAC_PWR_STATE                                      0x00000040
+#define NV50_DISP_DAC_PWR_STATE_ON                                   0x00000000
+#define NV50_DISP_DAC_PWR_STATE_OFF                                  0x00000040
+#define NV50_DISP_DAC_LOAD                                           0x0002000c
+#define NV50_DISP_DAC_LOAD_VALUE                                     0x00000007
+
+struct nv50_display_class {
+};
+
+/* 507a: NV50_DISP_CURS
+ * 827a: NV84_DISP_CURS
+ * 837a: NVA0_DISP_CURS
+ * 887a: NV94_DISP_CURS
+ * 857a: NVA3_DISP_CURS
+ * 907a: NVD0_DISP_CURS
+ * 917a: NVE0_DISP_CURS
+ */
+
+#define NV50_DISP_CURS_CLASS                                         0x0000507a
+#define NV84_DISP_CURS_CLASS                                         0x0000827a
+#define NVA0_DISP_CURS_CLASS                                         0x0000837a
+#define NV94_DISP_CURS_CLASS                                         0x0000887a
+#define NVA3_DISP_CURS_CLASS                                         0x0000857a
+#define NVD0_DISP_CURS_CLASS                                         0x0000907a
+#define NVE0_DISP_CURS_CLASS                                         0x0000917a
+
+struct nv50_display_curs_class {
+       u32 head;
+};
+
+/* 507b: NV50_DISP_OIMM
+ * 827b: NV84_DISP_OIMM
+ * 837b: NVA0_DISP_OIMM
+ * 887b: NV94_DISP_OIMM
+ * 857b: NVA3_DISP_OIMM
+ * 907b: NVD0_DISP_OIMM
+ * 917b: NVE0_DISP_OIMM
+ */
+
+#define NV50_DISP_OIMM_CLASS                                         0x0000507b
+#define NV84_DISP_OIMM_CLASS                                         0x0000827b
+#define NVA0_DISP_OIMM_CLASS                                         0x0000837b
+#define NV94_DISP_OIMM_CLASS                                         0x0000887b
+#define NVA3_DISP_OIMM_CLASS                                         0x0000857b
+#define NVD0_DISP_OIMM_CLASS                                         0x0000907b
+#define NVE0_DISP_OIMM_CLASS                                         0x0000917b
+
+struct nv50_display_oimm_class {
+       u32 head;
+};
+
+/* 507c: NV50_DISP_SYNC
+ * 827c: NV84_DISP_SYNC
+ * 837c: NVA0_DISP_SYNC
+ * 887c: NV94_DISP_SYNC
+ * 857c: NVA3_DISP_SYNC
+ * 907c: NVD0_DISP_SYNC
+ * 917c: NVE0_DISP_SYNC
+ */
+
+#define NV50_DISP_SYNC_CLASS                                         0x0000507c
+#define NV84_DISP_SYNC_CLASS                                         0x0000827c
+#define NVA0_DISP_SYNC_CLASS                                         0x0000837c
+#define NV94_DISP_SYNC_CLASS                                         0x0000887c
+#define NVA3_DISP_SYNC_CLASS                                         0x0000857c
+#define NVD0_DISP_SYNC_CLASS                                         0x0000907c
+#define NVE0_DISP_SYNC_CLASS                                         0x0000917c
+
+struct nv50_display_sync_class {
+       u32 pushbuf;
+       u32 head;
+};
+
+/* 507d: NV50_DISP_MAST
+ * 827d: NV84_DISP_MAST
+ * 837d: NVA0_DISP_MAST
+ * 887d: NV94_DISP_MAST
+ * 857d: NVA3_DISP_MAST
+ * 907d: NVD0_DISP_MAST
+ * 917d: NVE0_DISP_MAST
+ */
+
+#define NV50_DISP_MAST_CLASS                                         0x0000507d
+#define NV84_DISP_MAST_CLASS                                         0x0000827d
+#define NVA0_DISP_MAST_CLASS                                         0x0000837d
+#define NV94_DISP_MAST_CLASS                                         0x0000887d
+#define NVA3_DISP_MAST_CLASS                                         0x0000857d
+#define NVD0_DISP_MAST_CLASS                                         0x0000907d
+#define NVE0_DISP_MAST_CLASS                                         0x0000917d
+
+struct nv50_display_mast_class {
+       u32 pushbuf;
+};
+
+/* 507e: NV50_DISP_OVLY
+ * 827e: NV84_DISP_OVLY
+ * 837e: NVA0_DISP_OVLY
+ * 887e: NV94_DISP_OVLY
+ * 857e: NVA3_DISP_OVLY
+ * 907e: NVD0_DISP_OVLY
+ * 917e: NVE0_DISP_OVLY
+ */
+
+#define NV50_DISP_OVLY_CLASS                                         0x0000507e
+#define NV84_DISP_OVLY_CLASS                                         0x0000827e
+#define NVA0_DISP_OVLY_CLASS                                         0x0000837e
+#define NV94_DISP_OVLY_CLASS                                         0x0000887e
+#define NVA3_DISP_OVLY_CLASS                                         0x0000857e
+#define NVD0_DISP_OVLY_CLASS                                         0x0000907e
+#define NVE0_DISP_OVLY_CLASS                                         0x0000917e
+
+struct nv50_display_ovly_class {
+       u32 pushbuf;
+       u32 head;
+};
+
 #endif
index 8a947b6..2fd48b5 100644 (file)
@@ -39,6 +39,9 @@ void nouveau_engctx_destroy(struct nouveau_engctx *);
 int  nouveau_engctx_init(struct nouveau_engctx *);
 int  nouveau_engctx_fini(struct nouveau_engctx *, bool suspend);
 
+int  _nouveau_engctx_ctor(struct nouveau_object *, struct nouveau_object *,
+                         struct nouveau_oclass *, void *, u32,
+                         struct nouveau_object **);
 void _nouveau_engctx_dtor(struct nouveau_object *);
 int  _nouveau_engctx_init(struct nouveau_object *);
 int  _nouveau_engctx_fini(struct nouveau_object *, bool suspend);
diff --git a/drivers/gpu/drm/nouveau/core/include/core/falcon.h b/drivers/gpu/drm/nouveau/core/include/core/falcon.h
new file mode 100644 (file)
index 0000000..1edec38
--- /dev/null
@@ -0,0 +1,81 @@
+#ifndef __NOUVEAU_FALCON_H__
+#define __NOUVEAU_FALCON_H__
+
+#include <core/engine.h>
+#include <core/engctx.h>
+#include <core/gpuobj.h>
+
+struct nouveau_falcon_chan {
+       struct nouveau_engctx base;
+};
+
+#define nouveau_falcon_context_create(p,e,c,g,s,a,f,d)                         \
+       nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
+#define nouveau_falcon_context_destroy(d)                                      \
+       nouveau_engctx_destroy(&(d)->base)
+#define nouveau_falcon_context_init(d)                                         \
+       nouveau_engctx_init(&(d)->base)
+#define nouveau_falcon_context_fini(d,s)                                       \
+       nouveau_engctx_fini(&(d)->base, (s))
+
+#define _nouveau_falcon_context_ctor _nouveau_engctx_ctor
+#define _nouveau_falcon_context_dtor _nouveau_engctx_dtor
+#define _nouveau_falcon_context_init _nouveau_engctx_init
+#define _nouveau_falcon_context_fini _nouveau_engctx_fini
+#define _nouveau_falcon_context_rd32 _nouveau_engctx_rd32
+#define _nouveau_falcon_context_wr32 _nouveau_engctx_wr32
+
+struct nouveau_falcon_data {
+       bool external;
+};
+
+struct nouveau_falcon {
+       struct nouveau_engine base;
+
+       u32 addr;
+       u8  version;
+       u8  secret;
+
+       struct nouveau_gpuobj *core;
+       bool external;
+
+       struct {
+               u32 limit;
+               u32 *data;
+               u32  size;
+       } code;
+
+       struct {
+               u32 limit;
+               u32 *data;
+               u32  size;
+       } data;
+};
+
+#define nv_falcon(priv) (&(priv)->base)
+
+#define nouveau_falcon_create(p,e,c,b,d,i,f,r)                                 \
+       nouveau_falcon_create_((p), (e), (c), (b), (d), (i), (f),              \
+                              sizeof(**r),(void **)r)
+#define nouveau_falcon_destroy(p)                                              \
+       nouveau_engine_destroy(&(p)->base)
+#define nouveau_falcon_init(p) ({                                              \
+       struct nouveau_falcon *falcon = (p);                                   \
+       _nouveau_falcon_init(nv_object(falcon));                               \
+})
+#define nouveau_falcon_fini(p,s) ({                                            \
+       struct nouveau_falcon *falcon = (p);                                   \
+       _nouveau_falcon_fini(nv_object(falcon), (s));                          \
+})
+
+int nouveau_falcon_create_(struct nouveau_object *, struct nouveau_object *,
+                          struct nouveau_oclass *, u32, bool, const char *,
+                          const char *, int, void **);
+
+#define _nouveau_falcon_dtor _nouveau_engine_dtor
+int  _nouveau_falcon_init(struct nouveau_object *);
+int  _nouveau_falcon_fini(struct nouveau_object *, bool);
+u32  _nouveau_falcon_rd32(struct nouveau_object *, u64);
+void _nouveau_falcon_wr32(struct nouveau_object *, u64, u32);
+
+#endif
index 6eaff79..b3b9ce4 100644 (file)
@@ -65,7 +65,7 @@ nouveau_gpuobj_ref(struct nouveau_gpuobj *obj, struct nouveau_gpuobj **ref)
 void _nouveau_gpuobj_dtor(struct nouveau_object *);
 int  _nouveau_gpuobj_init(struct nouveau_object *);
 int  _nouveau_gpuobj_fini(struct nouveau_object *, bool);
-u32  _nouveau_gpuobj_rd32(struct nouveau_object *, u32);
-void _nouveau_gpuobj_wr32(struct nouveau_object *, u32, u32);
+u32  _nouveau_gpuobj_rd32(struct nouveau_object *, u64);
+void _nouveau_gpuobj_wr32(struct nouveau_object *, u64, u32);
 
 #endif
index 975137b..2514e81 100644 (file)
@@ -21,6 +21,12 @@ struct nouveau_mm {
        int heap_nodes;
 };
 
+static inline bool
+nouveau_mm_initialised(struct nouveau_mm *mm)
+{
+       return mm->block_size != 0;
+}
+
 int  nouveau_mm_init(struct nouveau_mm *, u32 offset, u32 length, u32 block);
 int  nouveau_mm_fini(struct nouveau_mm *);
 int  nouveau_mm_head(struct nouveau_mm *, u8 type, u32 size_max, u32 size_min,
index 486f1a9..5982935 100644 (file)
@@ -70,7 +70,8 @@ nv_pclass(struct nouveau_object *parent, u32 oclass)
 }
 
 struct nouveau_omthds {
-       u32 method;
+       u32 start;
+       u32 limit;
        int (*call)(struct nouveau_object *, u32, void *, u32);
 };
 
@@ -81,12 +82,12 @@ struct nouveau_ofuncs {
        void (*dtor)(struct nouveau_object *);
        int  (*init)(struct nouveau_object *);
        int  (*fini)(struct nouveau_object *, bool suspend);
-       u8   (*rd08)(struct nouveau_object *, u32 offset);
-       u16  (*rd16)(struct nouveau_object *, u32 offset);
-       u32  (*rd32)(struct nouveau_object *, u32 offset);
-       void (*wr08)(struct nouveau_object *, u32 offset, u8 data);
-       void (*wr16)(struct nouveau_object *, u32 offset, u16 data);
-       void (*wr32)(struct nouveau_object *, u32 offset, u32 data);
+       u8   (*rd08)(struct nouveau_object *, u64 offset);
+       u16  (*rd16)(struct nouveau_object *, u64 offset);
+       u32  (*rd32)(struct nouveau_object *, u64 offset);
+       void (*wr08)(struct nouveau_object *, u64 offset, u8 data);
+       void (*wr16)(struct nouveau_object *, u64 offset, u16 data);
+       void (*wr32)(struct nouveau_object *, u64 offset, u32 data);
 };
 
 static inline struct nouveau_ofuncs *
@@ -109,21 +110,27 @@ int nouveau_object_del(struct nouveau_object *, u32 parent, u32 handle);
 void nouveau_object_debug(void);
 
 static inline int
-nv_call(void *obj, u32 mthd, u32 data)
+nv_exec(void *obj, u32 mthd, void *data, u32 size)
 {
        struct nouveau_omthds *method = nv_oclass(obj)->omthds;
 
        while (method && method->call) {
-               if (method->method == mthd)
-                       return method->call(obj, mthd, &data, sizeof(data));
+               if (mthd >= method->start && mthd <= method->limit)
+                       return method->call(obj, mthd, data, size);
                method++;
        }
 
        return -EINVAL;
 }
 
+static inline int
+nv_call(void *obj, u32 mthd, u32 data)
+{
+       return nv_exec(obj, mthd, &data, sizeof(data));
+}
+
 static inline u8
-nv_ro08(void *obj, u32 addr)
+nv_ro08(void *obj, u64 addr)
 {
        u8 data = nv_ofuncs(obj)->rd08(obj, addr);
        nv_spam(obj, "nv_ro08 0x%08x 0x%02x\n", addr, data);
@@ -131,7 +138,7 @@ nv_ro08(void *obj, u32 addr)
 }
 
 static inline u16
-nv_ro16(void *obj, u32 addr)
+nv_ro16(void *obj, u64 addr)
 {
        u16 data = nv_ofuncs(obj)->rd16(obj, addr);
        nv_spam(obj, "nv_ro16 0x%08x 0x%04x\n", addr, data);
@@ -139,7 +146,7 @@ nv_ro16(void *obj, u32 addr)
 }
 
 static inline u32
-nv_ro32(void *obj, u32 addr)
+nv_ro32(void *obj, u64 addr)
 {
        u32 data = nv_ofuncs(obj)->rd32(obj, addr);
        nv_spam(obj, "nv_ro32 0x%08x 0x%08x\n", addr, data);
@@ -147,28 +154,28 @@ nv_ro32(void *obj, u32 addr)
 }
 
 static inline void
-nv_wo08(void *obj, u32 addr, u8 data)
+nv_wo08(void *obj, u64 addr, u8 data)
 {
        nv_spam(obj, "nv_wo08 0x%08x 0x%02x\n", addr, data);
        nv_ofuncs(obj)->wr08(obj, addr, data);
 }
 
 static inline void
-nv_wo16(void *obj, u32 addr, u16 data)
+nv_wo16(void *obj, u64 addr, u16 data)
 {
        nv_spam(obj, "nv_wo16 0x%08x 0x%04x\n", addr, data);
        nv_ofuncs(obj)->wr16(obj, addr, data);
 }
 
 static inline void
-nv_wo32(void *obj, u32 addr, u32 data)
+nv_wo32(void *obj, u64 addr, u32 data)
 {
        nv_spam(obj, "nv_wo32 0x%08x 0x%08x\n", addr, data);
        nv_ofuncs(obj)->wr32(obj, addr, data);
 }
 
 static inline u32
-nv_mo32(void *obj, u32 addr, u32 mask, u32 data)
+nv_mo32(void *obj, u64 addr, u32 mask, u32 data)
 {
        u32 temp = nv_ro32(obj, addr);
        nv_wo32(obj, addr, (temp & ~mask) | data);
index 3c2e940..31cd852 100644 (file)
@@ -14,7 +14,7 @@ struct nouveau_parent {
        struct nouveau_object base;
 
        struct nouveau_sclass *sclass;
-       u32 engine;
+       u64 engine;
 
        int  (*context_attach)(struct nouveau_object *,
                               struct nouveau_object *);
index 75d1ed5..13ccdf5 100644 (file)
@@ -1,45 +1,8 @@
 #ifndef __NOUVEAU_BSP_H__
 #define __NOUVEAU_BSP_H__
 
-#include <core/engine.h>
-#include <core/engctx.h>
-
-struct nouveau_bsp_chan {
-       struct nouveau_engctx base;
-};
-
-#define nouveau_bsp_context_create(p,e,c,g,s,a,f,d)                            \
-       nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
-#define nouveau_bsp_context_destroy(d)                                         \
-       nouveau_engctx_destroy(&(d)->base)
-#define nouveau_bsp_context_init(d)                                            \
-       nouveau_engctx_init(&(d)->base)
-#define nouveau_bsp_context_fini(d,s)                                          \
-       nouveau_engctx_fini(&(d)->base, (s))
-
-#define _nouveau_bsp_context_dtor _nouveau_engctx_dtor
-#define _nouveau_bsp_context_init _nouveau_engctx_init
-#define _nouveau_bsp_context_fini _nouveau_engctx_fini
-#define _nouveau_bsp_context_rd32 _nouveau_engctx_rd32
-#define _nouveau_bsp_context_wr32 _nouveau_engctx_wr32
-
-struct nouveau_bsp {
-       struct nouveau_engine base;
-};
-
-#define nouveau_bsp_create(p,e,c,d)                                            \
-       nouveau_engine_create((p), (e), (c), true, "PBSP", "bsp", (d))
-#define nouveau_bsp_destroy(d)                                                 \
-       nouveau_engine_destroy(&(d)->base)
-#define nouveau_bsp_init(d)                                                    \
-       nouveau_engine_init(&(d)->base)
-#define nouveau_bsp_fini(d,s)                                                  \
-       nouveau_engine_fini(&(d)->base, (s))
-
-#define _nouveau_bsp_dtor _nouveau_engine_dtor
-#define _nouveau_bsp_init _nouveau_engine_init
-#define _nouveau_bsp_fini _nouveau_engine_fini
-
 extern struct nouveau_oclass nv84_bsp_oclass;
+extern struct nouveau_oclass nvc0_bsp_oclass;
+extern struct nouveau_oclass nve0_bsp_oclass;
 
 #endif
index 70b9d8c..8cad2cf 100644 (file)
@@ -1,44 +1,7 @@
 #ifndef __NOUVEAU_COPY_H__
 #define __NOUVEAU_COPY_H__
 
-#include <core/engine.h>
-#include <core/engctx.h>
-
-struct nouveau_copy_chan {
-       struct nouveau_engctx base;
-};
-
-#define nouveau_copy_context_create(p,e,c,g,s,a,f,d)                           \
-       nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
-#define nouveau_copy_context_destroy(d)                                        \
-       nouveau_engctx_destroy(&(d)->base)
-#define nouveau_copy_context_init(d)                                           \
-       nouveau_engctx_init(&(d)->base)
-#define nouveau_copy_context_fini(d,s)                                         \
-       nouveau_engctx_fini(&(d)->base, (s))
-
-#define _nouveau_copy_context_dtor _nouveau_engctx_dtor
-#define _nouveau_copy_context_init _nouveau_engctx_init
-#define _nouveau_copy_context_fini _nouveau_engctx_fini
-#define _nouveau_copy_context_rd32 _nouveau_engctx_rd32
-#define _nouveau_copy_context_wr32 _nouveau_engctx_wr32
-
-struct nouveau_copy {
-       struct nouveau_engine base;
-};
-
-#define nouveau_copy_create(p,e,c,y,i,d)                                       \
-       nouveau_engine_create((p), (e), (c), (y), "PCE"#i, "copy"#i, (d))
-#define nouveau_copy_destroy(d)                                                \
-       nouveau_engine_destroy(&(d)->base)
-#define nouveau_copy_init(d)                                                   \
-       nouveau_engine_init(&(d)->base)
-#define nouveau_copy_fini(d,s)                                                 \
-       nouveau_engine_fini(&(d)->base, (s))
-
-#define _nouveau_copy_dtor _nouveau_engine_dtor
-#define _nouveau_copy_init _nouveau_engine_init
-#define _nouveau_copy_fini _nouveau_engine_fini
+void nva3_copy_intr(struct nouveau_subdev *);
 
 extern struct nouveau_oclass nva3_copy_oclass;
 extern struct nouveau_oclass nvc0_copy0_oclass;
index e367474..db97561 100644 (file)
@@ -1,45 +1,6 @@
 #ifndef __NOUVEAU_CRYPT_H__
 #define __NOUVEAU_CRYPT_H__
 
-#include <core/engine.h>
-#include <core/engctx.h>
-
-struct nouveau_crypt_chan {
-       struct nouveau_engctx base;
-};
-
-#define nouveau_crypt_context_create(p,e,c,g,s,a,f,d)                          \
-       nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
-#define nouveau_crypt_context_destroy(d)                                       \
-       nouveau_engctx_destroy(&(d)->base)
-#define nouveau_crypt_context_init(d)                                          \
-       nouveau_engctx_init(&(d)->base)
-#define nouveau_crypt_context_fini(d,s)                                        \
-       nouveau_engctx_fini(&(d)->base, (s))
-
-#define _nouveau_crypt_context_dtor _nouveau_engctx_dtor
-#define _nouveau_crypt_context_init _nouveau_engctx_init
-#define _nouveau_crypt_context_fini _nouveau_engctx_fini
-#define _nouveau_crypt_context_rd32 _nouveau_engctx_rd32
-#define _nouveau_crypt_context_wr32 _nouveau_engctx_wr32
-
-struct nouveau_crypt {
-       struct nouveau_engine base;
-};
-
-#define nouveau_crypt_create(p,e,c,d)                                          \
-       nouveau_engine_create((p), (e), (c), true, "PCRYPT", "crypt", (d))
-#define nouveau_crypt_destroy(d)                                               \
-       nouveau_engine_destroy(&(d)->base)
-#define nouveau_crypt_init(d)                                                  \
-       nouveau_engine_init(&(d)->base)
-#define nouveau_crypt_fini(d,s)                                                \
-       nouveau_engine_fini(&(d)->base, (s))
-
-#define _nouveau_crypt_dtor _nouveau_engine_dtor
-#define _nouveau_crypt_init _nouveau_engine_init
-#define _nouveau_crypt_fini _nouveau_engine_fini
-
 extern struct nouveau_oclass nv84_crypt_oclass;
 extern struct nouveau_oclass nv98_crypt_oclass;
 
index 38ec125..4694828 100644 (file)
@@ -39,6 +39,11 @@ nouveau_disp(void *obj)
 
 extern struct nouveau_oclass nv04_disp_oclass;
 extern struct nouveau_oclass nv50_disp_oclass;
+extern struct nouveau_oclass nv84_disp_oclass;
+extern struct nouveau_oclass nva0_disp_oclass;
+extern struct nouveau_oclass nv94_disp_oclass;
+extern struct nouveau_oclass nva3_disp_oclass;
 extern struct nouveau_oclass nvd0_disp_oclass;
+extern struct nouveau_oclass nve0_disp_oclass;
 
 #endif
index 700ccbb..b28914e 100644 (file)
@@ -12,29 +12,17 @@ struct nouveau_dmaobj {
        u32 access;
        u64 start;
        u64 limit;
+       u32 conf0;
 };
 
-#define nouveau_dmaobj_create(p,e,c,a,s,d)                                     \
-       nouveau_dmaobj_create_((p), (e), (c), (a), (s), sizeof(**d), (void **)d)
-#define nouveau_dmaobj_destroy(p)                                              \
-       nouveau_object_destroy(&(p)->base)
-#define nouveau_dmaobj_init(p)                                                 \
-       nouveau_object_init(&(p)->base)
-#define nouveau_dmaobj_fini(p,s)                                               \
-       nouveau_object_fini(&(p)->base, (s))
-
-int nouveau_dmaobj_create_(struct nouveau_object *, struct nouveau_object *,
-                          struct nouveau_oclass *, void *data, u32 size,
-                          int length, void **);
-
-#define _nouveau_dmaobj_dtor nouveau_object_destroy
-#define _nouveau_dmaobj_init nouveau_object_init
-#define _nouveau_dmaobj_fini nouveau_object_fini
-
 struct nouveau_dmaeng {
        struct nouveau_engine base;
-       int (*bind)(struct nouveau_dmaeng *, struct nouveau_object *parent,
-                   struct nouveau_dmaobj *, struct nouveau_gpuobj **);
+
+       /* creates a "physical" dma object from a struct nouveau_dmaobj */
+       int (*bind)(struct nouveau_dmaeng *dmaeng,
+                   struct nouveau_object *parent,
+                   struct nouveau_dmaobj *dmaobj,
+                   struct nouveau_gpuobj **);
 };
 
 #define nouveau_dmaeng_create(p,e,c,d)                                         \
@@ -53,5 +41,8 @@ struct nouveau_dmaeng {
 extern struct nouveau_oclass nv04_dmaeng_oclass;
 extern struct nouveau_oclass nv50_dmaeng_oclass;
 extern struct nouveau_oclass nvc0_dmaeng_oclass;
+extern struct nouveau_oclass nvd0_dmaeng_oclass;
+
+extern struct nouveau_oclass nouveau_dmaobj_sclass[];
 
 #endif
index d67fed1..f18846c 100644 (file)
@@ -33,15 +33,15 @@ int  nouveau_fifo_channel_create_(struct nouveau_object *,
                                  struct nouveau_object *,
                                  struct nouveau_oclass *,
                                  int bar, u32 addr, u32 size, u32 push,
-                                 u32 engmask, int len, void **);
+                                 u64 engmask, int len, void **);
 void nouveau_fifo_channel_destroy(struct nouveau_fifo_chan *);
 
 #define _nouveau_fifo_channel_init _nouveau_namedb_init
 #define _nouveau_fifo_channel_fini _nouveau_namedb_fini
 
 void _nouveau_fifo_channel_dtor(struct nouveau_object *);
-u32  _nouveau_fifo_channel_rd32(struct nouveau_object *, u32);
-void _nouveau_fifo_channel_wr32(struct nouveau_object *, u32, u32);
+u32  _nouveau_fifo_channel_rd32(struct nouveau_object *, u64);
+void _nouveau_fifo_channel_wr32(struct nouveau_object *, u64, u32);
 
 struct nouveau_fifo_base {
        struct nouveau_gpuobj base;
index 74d554f..0a66781 100644 (file)
@@ -1,45 +1,7 @@
 #ifndef __NOUVEAU_PPP_H__
 #define __NOUVEAU_PPP_H__
 
-#include <core/engine.h>
-#include <core/engctx.h>
-
-struct nouveau_ppp_chan {
-       struct nouveau_engctx base;
-};
-
-#define nouveau_ppp_context_create(p,e,c,g,s,a,f,d)                            \
-       nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
-#define nouveau_ppp_context_destroy(d)                                         \
-       nouveau_engctx_destroy(&(d)->base)
-#define nouveau_ppp_context_init(d)                                            \
-       nouveau_engctx_init(&(d)->base)
-#define nouveau_ppp_context_fini(d,s)                                          \
-       nouveau_engctx_fini(&(d)->base, (s))
-
-#define _nouveau_ppp_context_dtor _nouveau_engctx_dtor
-#define _nouveau_ppp_context_init _nouveau_engctx_init
-#define _nouveau_ppp_context_fini _nouveau_engctx_fini
-#define _nouveau_ppp_context_rd32 _nouveau_engctx_rd32
-#define _nouveau_ppp_context_wr32 _nouveau_engctx_wr32
-
-struct nouveau_ppp {
-       struct nouveau_engine base;
-};
-
-#define nouveau_ppp_create(p,e,c,d)                                            \
-       nouveau_engine_create((p), (e), (c), true, "PPPP", "ppp", (d))
-#define nouveau_ppp_destroy(d)                                                 \
-       nouveau_engine_destroy(&(d)->base)
-#define nouveau_ppp_init(d)                                                    \
-       nouveau_engine_init(&(d)->base)
-#define nouveau_ppp_fini(d,s)                                                  \
-       nouveau_engine_fini(&(d)->base, (s))
-
-#define _nouveau_ppp_dtor _nouveau_engine_dtor
-#define _nouveau_ppp_init _nouveau_engine_init
-#define _nouveau_ppp_fini _nouveau_engine_fini
-
 extern struct nouveau_oclass nv98_ppp_oclass;
+extern struct nouveau_oclass nvc0_ppp_oclass;
 
 #endif
index 05cd08f..d7b287b 100644 (file)
@@ -1,45 +1,8 @@
 #ifndef __NOUVEAU_VP_H__
 #define __NOUVEAU_VP_H__
 
-#include <core/engine.h>
-#include <core/engctx.h>
-
-struct nouveau_vp_chan {
-       struct nouveau_engctx base;
-};
-
-#define nouveau_vp_context_create(p,e,c,g,s,a,f,d)                             \
-       nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
-#define nouveau_vp_context_destroy(d)                                          \
-       nouveau_engctx_destroy(&(d)->base)
-#define nouveau_vp_context_init(d)                                             \
-       nouveau_engctx_init(&(d)->base)
-#define nouveau_vp_context_fini(d,s)                                           \
-       nouveau_engctx_fini(&(d)->base, (s))
-
-#define _nouveau_vp_context_dtor _nouveau_engctx_dtor
-#define _nouveau_vp_context_init _nouveau_engctx_init
-#define _nouveau_vp_context_fini _nouveau_engctx_fini
-#define _nouveau_vp_context_rd32 _nouveau_engctx_rd32
-#define _nouveau_vp_context_wr32 _nouveau_engctx_wr32
-
-struct nouveau_vp {
-       struct nouveau_engine base;
-};
-
-#define nouveau_vp_create(p,e,c,d)                                             \
-       nouveau_engine_create((p), (e), (c), true, "PVP", "vp", (d))
-#define nouveau_vp_destroy(d)                                                  \
-       nouveau_engine_destroy(&(d)->base)
-#define nouveau_vp_init(d)                                                     \
-       nouveau_engine_init(&(d)->base)
-#define nouveau_vp_fini(d,s)                                                   \
-       nouveau_engine_fini(&(d)->base, (s))
-
-#define _nouveau_vp_dtor _nouveau_engine_dtor
-#define _nouveau_vp_init _nouveau_engine_init
-#define _nouveau_vp_fini _nouveau_engine_fini
-
 extern struct nouveau_oclass nv84_vp_oclass;
+extern struct nouveau_oclass nvc0_vp_oclass;
+extern struct nouveau_oclass nve0_vp_oclass;
 
 #endif
index d682fb6..b79025d 100644 (file)
@@ -23,6 +23,7 @@ struct dcb_output {
        uint8_t bus;
        uint8_t location;
        uint8_t or;
+       uint8_t link;
        bool duallink_possible;
        union {
                struct sor_conf {
@@ -55,36 +56,11 @@ struct dcb_output {
 
 u16 dcb_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *ent, u8 *len);
 u16 dcb_outp(struct nouveau_bios *, u8 idx, u8 *ver, u8 *len);
+u16 dcb_outp_parse(struct nouveau_bios *, u8 idx, u8 *, u8 *,
+                  struct dcb_output *);
+u16 dcb_outp_match(struct nouveau_bios *, u16 type, u16 mask, u8 *, u8 *,
+                  struct dcb_output *);
 int dcb_outp_foreach(struct nouveau_bios *, void *data, int (*exec)
                     (struct nouveau_bios *, void *, int index, u16 entry));
 
-
-/* BIT 'U'/'d' table encoder subtables have hashes matching them to
- * a particular set of encoders.
- *
- * This function returns true if a particular DCB entry matches.
- */
-static inline bool
-dcb_hash_match(struct dcb_output *dcb, u32 hash)
-{
-       if ((hash & 0x000000f0) != (dcb->location << 4))
-               return false;
-       if ((hash & 0x0000000f) != dcb->type)
-               return false;
-       if (!(hash & (dcb->or << 16)))
-               return false;
-
-       switch (dcb->type) {
-       case DCB_OUTPUT_TMDS:
-       case DCB_OUTPUT_LVDS:
-       case DCB_OUTPUT_DP:
-               if (hash & 0x00c00000) {
-                       if (!(hash & (dcb->sorconf.link << 22)))
-                               return false;
-               }
-       default:
-               return true;
-       }
-}
-
 #endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/disp.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/disp.h
new file mode 100644 (file)
index 0000000..c35937e
--- /dev/null
@@ -0,0 +1,48 @@
+#ifndef __NVBIOS_DISP_H__
+#define __NVBIOS_DISP_H__
+
+u16 nvbios_disp_table(struct nouveau_bios *,
+                     u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *sub);
+
+struct nvbios_disp {
+       u16 data;
+};
+
+u16 nvbios_disp_entry(struct nouveau_bios *, u8 idx,
+                     u8 *ver, u8 *hdr__, u8 *sub);
+u16 nvbios_disp_parse(struct nouveau_bios *, u8 idx,
+                     u8 *ver, u8 *hdr__, u8 *sub,
+                     struct nvbios_disp *);
+
+struct nvbios_outp {
+       u16 type;
+       u16 mask;
+       u16 script[3];
+};
+
+u16 nvbios_outp_entry(struct nouveau_bios *, u8 idx,
+                     u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u16 nvbios_outp_parse(struct nouveau_bios *, u8 idx,
+                     u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+                     struct nvbios_outp *);
+u16 nvbios_outp_match(struct nouveau_bios *, u16 type, u16 mask,
+                     u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+                     struct nvbios_outp *);
+
+
+struct nvbios_ocfg {
+       u16 match;
+       u16 clkcmp[2];
+};
+
+u16 nvbios_ocfg_entry(struct nouveau_bios *, u16 outp, u8 idx,
+                     u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
+u16 nvbios_ocfg_parse(struct nouveau_bios *, u16 outp, u8 idx,
+                     u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+                     struct nvbios_ocfg *);
+u16 nvbios_ocfg_match(struct nouveau_bios *, u16 outp, u16 type,
+                     u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+                     struct nvbios_ocfg *);
+u16 nvbios_oclk_match(struct nouveau_bios *, u16 cmp, u32 khz);
+
+#endif
index 73b5e5d..6e54218 100644 (file)
@@ -1,8 +1,34 @@
 #ifndef __NVBIOS_DP_H__
 #define __NVBIOS_DP_H__
 
-u16 dp_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
-u16 dp_outp(struct nouveau_bios *, u8 idx, u8 *ver, u8 *len);
-u16 dp_outp_match(struct nouveau_bios *, struct dcb_output *, u8 *ver, u8 *len);
+struct nvbios_dpout {
+       u16 type;
+       u16 mask;
+       u8  flags;
+       u32 script[5];
+       u32 lnkcmp;
+};
+
+u16 nvbios_dpout_parse(struct nouveau_bios *, u8 idx,
+                      u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+                      struct nvbios_dpout *);
+u16 nvbios_dpout_match(struct nouveau_bios *, u16 type, u16 mask,
+                      u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+                      struct nvbios_dpout *);
+
+struct nvbios_dpcfg {
+       u8 drv;
+       u8 pre;
+       u8 unk;
+};
+
+u16
+nvbios_dpcfg_parse(struct nouveau_bios *, u16 outp, u8 idx,
+                  u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+                  struct nvbios_dpcfg *);
+u16
+nvbios_dpcfg_match(struct nouveau_bios *, u16 outp, u8 un, u8 vs, u8 pe,
+                  u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+                  struct nvbios_dpcfg *);
 
 #endif
index 5c1b5e1..da470e6 100644 (file)
@@ -69,8 +69,11 @@ struct nouveau_fb {
                } type;
                u64 stolen;
                u64 size;
+
                int ranks;
+               int parts;
 
+               int  (*init)(struct nouveau_fb *);
                int  (*get)(struct nouveau_fb *, u64 size, u32 align,
                            u32 size_nc, u32 type, struct nouveau_mem **);
                void (*put)(struct nouveau_fb *, struct nouveau_mem **);
@@ -84,6 +87,8 @@ struct nouveau_fb {
                int regions;
                void (*init)(struct nouveau_fb *, int i, u32 addr, u32 size,
                             u32 pitch, u32 flags, struct nouveau_fb_tile *);
+               void (*comp)(struct nouveau_fb *, int i, u32 size, u32 flags,
+                            struct nouveau_fb_tile *);
                void (*fini)(struct nouveau_fb *, int i,
                             struct nouveau_fb_tile *);
                void (*prog)(struct nouveau_fb *, int i,
@@ -99,7 +104,7 @@ nouveau_fb(void *obj)
 
 #define nouveau_fb_create(p,e,c,d)                                             \
        nouveau_subdev_create((p), (e), (c), 0, "PFB", "fb", (d))
-int  nouveau_fb_created(struct nouveau_fb *);
+int  nouveau_fb_preinit(struct nouveau_fb *);
 void nouveau_fb_destroy(struct nouveau_fb *);
 int  nouveau_fb_init(struct nouveau_fb *);
 #define nouveau_fb_fini(p,s)                                                   \
@@ -111,9 +116,19 @@ int  _nouveau_fb_init(struct nouveau_object *);
 
 extern struct nouveau_oclass nv04_fb_oclass;
 extern struct nouveau_oclass nv10_fb_oclass;
+extern struct nouveau_oclass nv1a_fb_oclass;
 extern struct nouveau_oclass nv20_fb_oclass;
+extern struct nouveau_oclass nv25_fb_oclass;
 extern struct nouveau_oclass nv30_fb_oclass;
+extern struct nouveau_oclass nv35_fb_oclass;
+extern struct nouveau_oclass nv36_fb_oclass;
 extern struct nouveau_oclass nv40_fb_oclass;
+extern struct nouveau_oclass nv41_fb_oclass;
+extern struct nouveau_oclass nv44_fb_oclass;
+extern struct nouveau_oclass nv46_fb_oclass;
+extern struct nouveau_oclass nv47_fb_oclass;
+extern struct nouveau_oclass nv49_fb_oclass;
+extern struct nouveau_oclass nv4e_fb_oclass;
 extern struct nouveau_oclass nv50_fb_oclass;
 extern struct nouveau_oclass nvc0_fb_oclass;
 
@@ -122,13 +137,35 @@ int  nouveau_fb_bios_memtype(struct nouveau_bios *);
 
 bool nv04_fb_memtype_valid(struct nouveau_fb *, u32 memtype);
 
+void nv10_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
+                      u32 pitch, u32 flags, struct nouveau_fb_tile *);
+void nv10_fb_tile_fini(struct nouveau_fb *, int i, struct nouveau_fb_tile *);
 void nv10_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
 
+int  nv20_fb_vram_init(struct nouveau_fb *);
+void nv20_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
+                      u32 pitch, u32 flags, struct nouveau_fb_tile *);
+void nv20_fb_tile_fini(struct nouveau_fb *, int i, struct nouveau_fb_tile *);
+void nv20_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
+
+int  nv30_fb_init(struct nouveau_object *);
 void nv30_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
                       u32 pitch, u32 flags, struct nouveau_fb_tile *);
-void nv30_fb_tile_fini(struct nouveau_fb *, int i, struct nouveau_fb_tile *);
+
+void nv40_fb_tile_comp(struct nouveau_fb *, int i, u32 size, u32 flags,
+                      struct nouveau_fb_tile *);
+
+int  nv41_fb_vram_init(struct nouveau_fb *);
+int  nv41_fb_init(struct nouveau_object *);
+void nv41_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
+
+int  nv44_fb_vram_init(struct nouveau_fb *);
+int  nv44_fb_init(struct nouveau_object *);
+void nv44_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
+
+void nv46_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
+                      u32 pitch, u32 flags, struct nouveau_fb_tile *);
 
 void nv50_fb_vram_del(struct nouveau_fb *, struct nouveau_mem **);
-void nv50_fb_trap(struct nouveau_fb *, int display);
 
 #endif
index cd01c53..d70ba34 100644 (file)
@@ -65,14 +65,14 @@ nouveau_barobj_dtor(struct nouveau_object *object)
 }
 
 static u32
-nouveau_barobj_rd32(struct nouveau_object *object, u32 addr)
+nouveau_barobj_rd32(struct nouveau_object *object, u64 addr)
 {
        struct nouveau_barobj *barobj = (void *)object;
        return ioread32_native(barobj->iomem + addr);
 }
 
 static void
-nouveau_barobj_wr32(struct nouveau_object *object, u32 addr, u32 data)
+nouveau_barobj_wr32(struct nouveau_object *object, u64 addr, u32 data)
 {
        struct nouveau_barobj *barobj = (void *)object;
        iowrite32_native(data, barobj->iomem + addr);
index 70ca7d5..dd11194 100644 (file)
@@ -63,7 +63,7 @@ nouveau_bios_shadow_of(struct nouveau_bios *bios)
        struct pci_dev *pdev = nv_device(bios)->pdev;
        struct device_node *dn;
        const u32 *data;
-       int size, i;
+       int size;
 
        dn = pci_device_to_OF_node(pdev);
        if (!dn) {
@@ -210,11 +210,19 @@ nouveau_bios_shadow_acpi(struct nouveau_bios *bios)
                return;
 
        bios->data = kmalloc(bios->size, GFP_KERNEL);
-       for (i = 0; bios->data && i < bios->size; i += cnt) {
-               cnt = min((bios->size - i), (u32)4096);
-               ret = nouveau_acpi_get_bios_chunk(bios->data, i, cnt);
-               if (ret != cnt)
-                       break;
+       if (bios->data) {
+               /* disobey the acpi spec - much faster on at least w530 ... */
+               ret = nouveau_acpi_get_bios_chunk(bios->data, 0, bios->size);
+               if (ret != bios->size ||
+                   nvbios_checksum(bios->data, bios->size)) {
+                       /* ... that didn't work, ok, i'll be good now */
+                       for (i = 0; i < bios->size; i += cnt) {
+                               cnt = min((bios->size - i), (u32)4096);
+                               ret = nouveau_acpi_get_bios_chunk(bios->data, i, cnt);
+                               if (ret != cnt)
+                                       break;
+                       }
+               }
        }
 }
 
@@ -358,42 +366,42 @@ nouveau_bios_shadow(struct nouveau_bios *bios)
 }
 
 static u8
-nouveau_bios_rd08(struct nouveau_object *object, u32 addr)
+nouveau_bios_rd08(struct nouveau_object *object, u64 addr)
 {
        struct nouveau_bios *bios = (void *)object;
        return bios->data[addr];
 }
 
 static u16
-nouveau_bios_rd16(struct nouveau_object *object, u32 addr)
+nouveau_bios_rd16(struct nouveau_object *object, u64 addr)
 {
        struct nouveau_bios *bios = (void *)object;
        return get_unaligned_le16(&bios->data[addr]);
 }
 
 static u32
-nouveau_bios_rd32(struct nouveau_object *object, u32 addr)
+nouveau_bios_rd32(struct nouveau_object *object, u64 addr)
 {
        struct nouveau_bios *bios = (void *)object;
        return get_unaligned_le32(&bios->data[addr]);
 }
 
 static void
-nouveau_bios_wr08(struct nouveau_object *object, u32 addr, u8 data)
+nouveau_bios_wr08(struct nouveau_object *object, u64 addr, u8 data)
 {
        struct nouveau_bios *bios = (void *)object;
        bios->data[addr] = data;
 }
 
 static void
-nouveau_bios_wr16(struct nouveau_object *object, u32 addr, u16 data)
+nouveau_bios_wr16(struct nouveau_object *object, u64 addr, u16 data)
 {
        struct nouveau_bios *bios = (void *)object;
        put_unaligned_le16(data, &bios->data[addr]);
 }
 
 static void
-nouveau_bios_wr32(struct nouveau_object *object, u32 addr, u32 data)
+nouveau_bios_wr32(struct nouveau_object *object, u64 addr, u32 data)
 {
        struct nouveau_bios *bios = (void *)object;
        put_unaligned_le32(data, &bios->data[addr]);
index c511971..0fd87df 100644 (file)
@@ -107,6 +107,69 @@ dcb_outp(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len)
        return 0x0000;
 }
 
+u16
+dcb_outp_parse(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len,
+              struct dcb_output *outp)
+{
+       u16 dcb = dcb_outp(bios, idx, ver, len);
+       if (dcb) {
+               if (*ver >= 0x20) {
+                       u32 conn = nv_ro32(bios, dcb + 0x00);
+                       outp->or        = (conn & 0x0f000000) >> 24;
+                       outp->location  = (conn & 0x00300000) >> 20;
+                       outp->bus       = (conn & 0x000f0000) >> 16;
+                       outp->connector = (conn & 0x0000f000) >> 12;
+                       outp->heads     = (conn & 0x00000f00) >> 8;
+                       outp->i2c_index = (conn & 0x000000f0) >> 4;
+                       outp->type      = (conn & 0x0000000f);
+                       outp->link      = 0;
+               } else {
+                       dcb = 0x0000;
+               }
+
+               if (*ver >= 0x40) {
+                       u32 conf = nv_ro32(bios, dcb + 0x04);
+                       switch (outp->type) {
+                       case DCB_OUTPUT_TMDS:
+                       case DCB_OUTPUT_LVDS:
+                       case DCB_OUTPUT_DP:
+                               outp->link = (conf & 0x00000030) >> 4;
+                               outp->sorconf.link = outp->link; /*XXX*/
+                               break;
+                       default:
+                               break;
+                       }
+               }
+       }
+       return dcb;
+}
+
+static inline u16
+dcb_outp_hasht(struct dcb_output *outp)
+{
+       return outp->type;
+}
+
+static inline u16
+dcb_outp_hashm(struct dcb_output *outp)
+{
+       return (outp->heads << 8) | (outp->link << 6) | outp->or;
+}
+
+u16
+dcb_outp_match(struct nouveau_bios *bios, u16 type, u16 mask,
+              u8 *ver, u8 *len, struct dcb_output *outp)
+{
+       u16 dcb, idx = 0;
+       while ((dcb = dcb_outp_parse(bios, idx++, ver, len, outp))) {
+               if (dcb_outp_hasht(outp) == type) {
+                       if ((dcb_outp_hashm(outp) & mask) == mask)
+                               break;
+               }
+       }
+       return dcb;
+}
+
 int
 dcb_outp_foreach(struct nouveau_bios *bios, void *data,
                 int (*exec)(struct nouveau_bios *, void *, int, u16))
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/disp.c b/drivers/gpu/drm/nouveau/core/subdev/bios/disp.c
new file mode 100644 (file)
index 0000000..7f16e52
--- /dev/null
@@ -0,0 +1,178 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/bios.h>
+#include <subdev/bios/bit.h>
+#include <subdev/bios/disp.h>
+
+u16
+nvbios_disp_table(struct nouveau_bios *bios,
+                 u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *sub)
+{
+       struct bit_entry U;
+
+       if (!bit_entry(bios, 'U', &U)) {
+               if (U.version == 1) {
+                       u16 data = nv_ro16(bios, U.offset);
+                       if (data) {
+                               *ver = nv_ro08(bios, data + 0x00);
+                               switch (*ver) {
+                               case 0x20:
+                               case 0x21:
+                                       *hdr = nv_ro08(bios, data + 0x01);
+                                       *len = nv_ro08(bios, data + 0x02);
+                                       *cnt = nv_ro08(bios, data + 0x03);
+                                       *sub = nv_ro08(bios, data + 0x04);
+                                       return data;
+                               default:
+                                       break;
+                               }
+                       }
+               }
+       }
+
+       return 0x0000;
+}
+
+u16
+nvbios_disp_entry(struct nouveau_bios *bios, u8 idx,
+                 u8 *ver, u8 *len, u8 *sub)
+{
+       u8  hdr, cnt;
+       u16 data = nvbios_disp_table(bios, ver, &hdr, &cnt, len, sub);
+       if (data && idx < cnt)
+               return data + hdr + (idx * *len);
+       *ver = 0x00;
+       return 0x0000;
+}
+
+u16
+nvbios_disp_parse(struct nouveau_bios *bios, u8 idx,
+                 u8 *ver, u8 *len, u8 *sub,
+                 struct nvbios_disp *info)
+{
+       u16 data = nvbios_disp_entry(bios, idx, ver, len, sub);
+       if (data && *len >= 2) {
+               info->data = nv_ro16(bios, data + 0);
+               return data;
+       }
+       return 0x0000;
+}
+
+u16
+nvbios_outp_entry(struct nouveau_bios *bios, u8 idx,
+                 u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+       struct nvbios_disp info;
+       u16 data = nvbios_disp_parse(bios, idx, ver, len, hdr, &info);
+       if (data) {
+               *cnt = nv_ro08(bios, info.data + 0x05);
+               *len = 0x06;
+               data = info.data;
+       }
+       return data;
+}
+
+u16
+nvbios_outp_parse(struct nouveau_bios *bios, u8 idx,
+                 u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+                 struct nvbios_outp *info)
+{
+       u16 data = nvbios_outp_entry(bios, idx, ver, hdr, cnt, len);
+       if (data && *hdr >= 0x0a) {
+               info->type      = nv_ro16(bios, data + 0x00);
+               info->mask      = nv_ro32(bios, data + 0x02);
+               if (*ver <= 0x20) /* match any link */
+                       info->mask |= 0x00c0;
+               info->script[0] = nv_ro16(bios, data + 0x06);
+               info->script[1] = nv_ro16(bios, data + 0x08);
+               info->script[2] = 0x0000;
+               if (*hdr >= 0x0c)
+                       info->script[2] = nv_ro16(bios, data + 0x0a);
+               return data;
+       }
+       return 0x0000;
+}
+
+u16
+nvbios_outp_match(struct nouveau_bios *bios, u16 type, u16 mask,
+                 u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+                 struct nvbios_outp *info)
+{
+       u16 data, idx = 0;
+       while ((data = nvbios_outp_parse(bios, idx++, ver, hdr, cnt, len, info)) || *ver) {
+               if (data && info->type == type) {
+                       if ((info->mask & mask) == mask)
+                               break;
+               }
+       }
+       return data;
+}
+
+u16
+nvbios_ocfg_entry(struct nouveau_bios *bios, u16 outp, u8 idx,
+                 u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+       if (idx < *cnt)
+               return outp + *hdr + (idx * *len);
+       return 0x0000;
+}
+
+u16
+nvbios_ocfg_parse(struct nouveau_bios *bios, u16 outp, u8 idx,
+                 u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+                 struct nvbios_ocfg *info)
+{
+       u16 data = nvbios_ocfg_entry(bios, outp, idx, ver, hdr, cnt, len);
+       if (data) {
+               info->match     = nv_ro16(bios, data + 0x00);
+               info->clkcmp[0] = nv_ro16(bios, data + 0x02);
+               info->clkcmp[1] = nv_ro16(bios, data + 0x04);
+       }
+       return data;
+}
+
+u16
+nvbios_ocfg_match(struct nouveau_bios *bios, u16 outp, u16 type,
+                 u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+                 struct nvbios_ocfg *info)
+{
+       u16 data, idx = 0;
+       while ((data = nvbios_ocfg_parse(bios, outp, idx++, ver, hdr, cnt, len, info))) {
+               if (info->match == type)
+                       break;
+       }
+       return data;
+}
+
+u16
+nvbios_oclk_match(struct nouveau_bios *bios, u16 cmp, u32 khz)
+{
+       while (cmp) {
+               if (khz / 10 >= nv_ro16(bios, cmp + 0x00))
+                       return  nv_ro16(bios, cmp + 0x02);
+               cmp += 0x04;
+       }
+       return 0x0000;
+}
index 3cbc0f3..663853b 100644 (file)
 
 #include "subdev/bios.h"
 #include "subdev/bios/bit.h"
-#include "subdev/bios/dcb.h"
 #include "subdev/bios/dp.h"
 
-u16
-dp_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+static u16
+nvbios_dp_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
 {
-       struct bit_entry bit_d;
+       struct bit_entry d;
 
-       if (!bit_entry(bios, 'd', &bit_d)) {
-               if (bit_d.version == 1) {
-                       u16 data = nv_ro16(bios, bit_d.offset);
+       if (!bit_entry(bios, 'd', &d)) {
+               if (d.version == 1 && d.length >= 2) {
+                       u16 data = nv_ro16(bios, d.offset);
                        if (data) {
-                               *ver = nv_ro08(bios, data + 0);
-                               *hdr = nv_ro08(bios, data + 1);
-                               *len = nv_ro08(bios, data + 2);
-                               *cnt = nv_ro08(bios, data + 3);
-                               return data;
+                               *ver = nv_ro08(bios, data + 0x00);
+                               switch (*ver) {
+                               case 0x21:
+                               case 0x30:
+                               case 0x40:
+                                       *hdr = nv_ro08(bios, data + 0x01);
+                                       *len = nv_ro08(bios, data + 0x02);
+                                       *cnt = nv_ro08(bios, data + 0x03);
+                                       return data;
+                               default:
+                                       break;
+                               }
                        }
                }
        }
@@ -49,28 +55,150 @@ dp_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
        return 0x0000;
 }
 
+static u16
+nvbios_dpout_entry(struct nouveau_bios *bios, u8 idx,
+                  u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+       u16 data = nvbios_dp_table(bios, ver, hdr, cnt, len);
+       if (data && idx < *cnt) {
+               u16 outp = nv_ro16(bios, data + *hdr + idx * *len);
+               switch (*ver * !!outp) {
+               case 0x21:
+               case 0x30:
+                       *hdr = nv_ro08(bios, data + 0x04);
+                       *len = nv_ro08(bios, data + 0x05);
+                       *cnt = nv_ro08(bios, outp + 0x04);
+                       break;
+               case 0x40:
+                       *hdr = nv_ro08(bios, data + 0x04);
+                       *cnt = 0;
+                       *len = 0;
+                       break;
+               default:
+                       break;
+               }
+               return outp;
+       }
+       *ver = 0x00;
+       return 0x0000;
+}
+
 u16
-dp_outp(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len)
+nvbios_dpout_parse(struct nouveau_bios *bios, u8 idx,
+                  u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+                  struct nvbios_dpout *info)
 {
-       u8  hdr, cnt;
-       u16 table = dp_table(bios, ver, &hdr, &cnt, len);
-       if (table && idx < cnt)
-               return nv_ro16(bios, table + hdr + (idx * *len));
-       return 0xffff;
+       u16 data = nvbios_dpout_entry(bios, idx, ver, hdr, cnt, len);
+       if (data && *ver) {
+               info->type = nv_ro16(bios, data + 0x00);
+               info->mask = nv_ro16(bios, data + 0x02);
+               switch (*ver) {
+               case 0x21:
+               case 0x30:
+                       info->flags     = nv_ro08(bios, data + 0x05);
+                       info->script[0] = nv_ro16(bios, data + 0x06);
+                       info->script[1] = nv_ro16(bios, data + 0x08);
+                       info->lnkcmp    = nv_ro16(bios, data + 0x0a);
+                       info->script[2] = nv_ro16(bios, data + 0x0c);
+                       info->script[3] = nv_ro16(bios, data + 0x0e);
+                       info->script[4] = nv_ro16(bios, data + 0x10);
+                       break;
+               case 0x40:
+                       info->flags     = nv_ro08(bios, data + 0x04);
+                       info->script[0] = nv_ro16(bios, data + 0x05);
+                       info->script[1] = nv_ro16(bios, data + 0x07);
+                       info->lnkcmp    = nv_ro16(bios, data + 0x09);
+                       info->script[2] = nv_ro16(bios, data + 0x0b);
+                       info->script[3] = nv_ro16(bios, data + 0x0d);
+                       info->script[4] = nv_ro16(bios, data + 0x0f);
+                       break;
+               default:
+                       data = 0x0000;
+                       break;
+               }
+       }
+       return data;
 }
 
 u16
-dp_outp_match(struct nouveau_bios *bios, struct dcb_output *outp,
-             u8 *ver, u8 *len)
+nvbios_dpout_match(struct nouveau_bios *bios, u16 type, u16 mask,
+                  u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+                  struct nvbios_dpout *info)
 {
-       u8  idx = 0;
-       u16 data;
-       while ((data = dp_outp(bios, idx++, ver, len)) != 0xffff) {
-               if (data) {
-                       u32 hash = nv_ro32(bios, data);
-                       if (dcb_hash_match(outp, hash))
-                               return data;
+       u16 data, idx = 0;
+       while ((data = nvbios_dpout_parse(bios, idx++, ver, hdr, cnt, len, info)) || *ver) {
+               if (data && info->type == type) {
+                       if ((info->mask & mask) == mask)
+                               break;
                }
        }
+       return data;
+}
+
+static u16
+nvbios_dpcfg_entry(struct nouveau_bios *bios, u16 outp, u8 idx,
+                  u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
+{
+       if (*ver >= 0x40) {
+               outp = nvbios_dp_table(bios, ver, hdr, cnt, len);
+               *hdr = *hdr + (*len * * cnt);
+               *len = nv_ro08(bios, outp + 0x06);
+               *cnt = nv_ro08(bios, outp + 0x07);
+       }
+
+       if (idx < *cnt)
+               return outp + *hdr + (idx * *len);
+
        return 0x0000;
 }
+
+u16
+nvbios_dpcfg_parse(struct nouveau_bios *bios, u16 outp, u8 idx,
+                  u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+                  struct nvbios_dpcfg *info)
+{
+       u16 data = nvbios_dpcfg_entry(bios, outp, idx, ver, hdr, cnt, len);
+       if (data) {
+               switch (*ver) {
+               case 0x21:
+                       info->drv = nv_ro08(bios, data + 0x02);
+                       info->pre = nv_ro08(bios, data + 0x03);
+                       info->unk = nv_ro08(bios, data + 0x04);
+                       break;
+               case 0x30:
+               case 0x40:
+                       info->drv = nv_ro08(bios, data + 0x01);
+                       info->pre = nv_ro08(bios, data + 0x02);
+                       info->unk = nv_ro08(bios, data + 0x03);
+                       break;
+               default:
+                       data = 0x0000;
+                       break;
+               }
+       }
+       return data;
+}
+
+u16
+nvbios_dpcfg_match(struct nouveau_bios *bios, u16 outp, u8 un, u8 vs, u8 pe,
+                  u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+                  struct nvbios_dpcfg *info)
+{
+       u8 idx = 0xff;
+       u16 data;
+
+       if (*ver >= 0x30) {
+               const u8 vsoff[] = { 0, 4, 7, 9 };
+               idx = (un * 10) + vsoff[vs] + pe;
+       } else {
+               while ((data = nvbios_dpcfg_entry(bios, outp, idx,
+                                                 ver, hdr, cnt, len))) {
+                       if (nv_ro08(bios, data + 0x00) == vs &&
+                           nv_ro08(bios, data + 0x01) == pe)
+                               break;
+                       idx++;
+               }
+       }
+
+       return nvbios_dpcfg_parse(bios, outp, pe, ver, hdr, cnt, len, info);
+}
index 4c9f1e5..c90d4aa 100644 (file)
@@ -101,8 +101,8 @@ dcb_gpio_parse(struct nouveau_bios *bios, int idx, u8 func, u8 line,
        }
 
        /* DCB 2.2, fixed TVDAC GPIO data */
-       if ((entry = dcb_table(bios, &ver, &hdr, &cnt, &len)) && ver >= 0x22) {
-               if (func == DCB_GPIO_TVDAC0) {
+       if ((entry = dcb_table(bios, &ver, &hdr, &cnt, &len))) {
+               if (ver >= 0x22 && ver < 0x30 && func == DCB_GPIO_TVDAC0) {
                        u8 conf = nv_ro08(bios, entry - 5);
                        u8 addr = nv_ro08(bios, entry - 4);
                        if (conf & 0x01) {
index 6be8c32..ae168bb 100644 (file)
@@ -743,9 +743,10 @@ static void
 init_dp_condition(struct nvbios_init *init)
 {
        struct nouveau_bios *bios = init->bios;
+       struct nvbios_dpout info;
        u8  cond = nv_ro08(bios, init->offset + 1);
        u8  unkn = nv_ro08(bios, init->offset + 2);
-       u8  ver, len;
+       u8  ver, hdr, cnt, len;
        u16 data;
 
        trace("DP_CONDITION\t0x%02x 0x%02x\n", cond, unkn);
@@ -759,10 +760,12 @@ init_dp_condition(struct nvbios_init *init)
        case 1:
        case 2:
                if ( init->outp &&
-                   (data = dp_outp_match(bios, init->outp, &ver, &len))) {
-                       if (ver <= 0x40 && !(nv_ro08(bios, data + 5) & cond))
-                               init_exec_set(init, false);
-                       if (ver == 0x40 && !(nv_ro08(bios, data + 4) & cond))
+                   (data = nvbios_dpout_match(bios, DCB_OUTPUT_DP,
+                                              (init->outp->or << 0) |
+                                              (init->outp->sorconf.link << 6),
+                                              &ver, &hdr, &cnt, &len, &info)))
+               {
+                       if (!(info.flags & cond))
                                init_exec_set(init, false);
                        break;
                }
index ca9a464..f8a7ed4 100644 (file)
@@ -25,7 +25,6 @@
 #include <core/object.h>
 #include <core/device.h>
 #include <core/client.h>
-#include <core/device.h>
 #include <core/option.h>
 
 #include <core/class.h>
@@ -61,19 +60,24 @@ struct nouveau_devobj {
 
 static const u64 disable_map[] = {
        [NVDEV_SUBDEV_VBIOS]    = NV_DEVICE_DISABLE_VBIOS,
+       [NVDEV_SUBDEV_DEVINIT]  = NV_DEVICE_DISABLE_CORE,
        [NVDEV_SUBDEV_GPIO]     = NV_DEVICE_DISABLE_CORE,
        [NVDEV_SUBDEV_I2C]      = NV_DEVICE_DISABLE_CORE,
-       [NVDEV_SUBDEV_DEVINIT]  = NV_DEVICE_DISABLE_CORE,
+       [NVDEV_SUBDEV_CLOCK]    = NV_DEVICE_DISABLE_CORE,
+       [NVDEV_SUBDEV_MXM]      = NV_DEVICE_DISABLE_CORE,
        [NVDEV_SUBDEV_MC]       = NV_DEVICE_DISABLE_CORE,
        [NVDEV_SUBDEV_TIMER]    = NV_DEVICE_DISABLE_CORE,
        [NVDEV_SUBDEV_FB]       = NV_DEVICE_DISABLE_CORE,
-       [NVDEV_SUBDEV_VM]       = NV_DEVICE_DISABLE_CORE,
+       [NVDEV_SUBDEV_LTCG]     = NV_DEVICE_DISABLE_CORE,
+       [NVDEV_SUBDEV_IBUS]     = NV_DEVICE_DISABLE_CORE,
        [NVDEV_SUBDEV_INSTMEM]  = NV_DEVICE_DISABLE_CORE,
+       [NVDEV_SUBDEV_VM]       = NV_DEVICE_DISABLE_CORE,
        [NVDEV_SUBDEV_BAR]      = NV_DEVICE_DISABLE_CORE,
        [NVDEV_SUBDEV_VOLT]     = NV_DEVICE_DISABLE_CORE,
-       [NVDEV_SUBDEV_CLOCK]    = NV_DEVICE_DISABLE_CORE,
        [NVDEV_SUBDEV_THERM]    = NV_DEVICE_DISABLE_CORE,
        [NVDEV_ENGINE_DMAOBJ]   = NV_DEVICE_DISABLE_CORE,
+       [NVDEV_ENGINE_FIFO]     = NV_DEVICE_DISABLE_FIFO,
+       [NVDEV_ENGINE_SW]       = NV_DEVICE_DISABLE_FIFO,
        [NVDEV_ENGINE_GR]       = NV_DEVICE_DISABLE_GRAPH,
        [NVDEV_ENGINE_MPEG]     = NV_DEVICE_DISABLE_MPEG,
        [NVDEV_ENGINE_ME]       = NV_DEVICE_DISABLE_ME,
@@ -84,7 +88,7 @@ static const u64 disable_map[] = {
        [NVDEV_ENGINE_COPY0]    = NV_DEVICE_DISABLE_COPY0,
        [NVDEV_ENGINE_COPY1]    = NV_DEVICE_DISABLE_COPY1,
        [NVDEV_ENGINE_UNK1C1]   = NV_DEVICE_DISABLE_UNK1C1,
-       [NVDEV_ENGINE_FIFO]     = NV_DEVICE_DISABLE_FIFO,
+       [NVDEV_ENGINE_VENC]     = NV_DEVICE_DISABLE_VENC,
        [NVDEV_ENGINE_DISP]     = NV_DEVICE_DISABLE_DISP,
        [NVDEV_SUBDEV_NR]       = 0,
 };
@@ -208,7 +212,7 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
 
                /* determine frequency of timing crystal */
                if ( device->chipset < 0x17 ||
-                   (device->chipset >= 0x20 && device->chipset <= 0x25))
+                   (device->chipset >= 0x20 && device->chipset < 0x25))
                        strap &= 0x00000040;
                else
                        strap &= 0x00400040;
@@ -356,37 +360,37 @@ fail:
 }
 
 static u8
-nouveau_devobj_rd08(struct nouveau_object *object, u32 addr)
+nouveau_devobj_rd08(struct nouveau_object *object, u64 addr)
 {
        return nv_rd08(object->engine, addr);
 }
 
 static u16
-nouveau_devobj_rd16(struct nouveau_object *object, u32 addr)
+nouveau_devobj_rd16(struct nouveau_object *object, u64 addr)
 {
        return nv_rd16(object->engine, addr);
 }
 
 static u32
-nouveau_devobj_rd32(struct nouveau_object *object, u32 addr)
+nouveau_devobj_rd32(struct nouveau_object *object, u64 addr)
 {
        return nv_rd32(object->engine, addr);
 }
 
 static void
-nouveau_devobj_wr08(struct nouveau_object *object, u32 addr, u8 data)
+nouveau_devobj_wr08(struct nouveau_object *object, u64 addr, u8 data)
 {
        nv_wr08(object->engine, addr, data);
 }
 
 static void
-nouveau_devobj_wr16(struct nouveau_object *object, u32 addr, u16 data)
+nouveau_devobj_wr16(struct nouveau_object *object, u64 addr, u16 data)
 {
        nv_wr16(object->engine, addr, data);
 }
 
 static void
-nouveau_devobj_wr32(struct nouveau_object *object, u32 addr, u32 data)
+nouveau_devobj_wr32(struct nouveau_object *object, u64 addr, u32 data)
 {
        nv_wr32(object->engine, addr, data);
 }
index f09accf..9c40b0f 100644 (file)
@@ -105,7 +105,7 @@ nv10_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-               device->oclass[NVDEV_SUBDEV_FB     ] = &nv10_fb_oclass;
+               device->oclass[NVDEV_SUBDEV_FB     ] = &nv1a_fb_oclass;
                device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -159,7 +159,7 @@ nv10_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-               device->oclass[NVDEV_SUBDEV_FB     ] = &nv10_fb_oclass;
+               device->oclass[NVDEV_SUBDEV_FB     ] = &nv1a_fb_oclass;
                device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
index 5fa58b7..74f88f4 100644 (file)
@@ -72,7 +72,7 @@ nv20_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-               device->oclass[NVDEV_SUBDEV_FB     ] = &nv20_fb_oclass;
+               device->oclass[NVDEV_SUBDEV_FB     ] = &nv25_fb_oclass;
                device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -90,7 +90,7 @@ nv20_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-               device->oclass[NVDEV_SUBDEV_FB     ] = &nv20_fb_oclass;
+               device->oclass[NVDEV_SUBDEV_FB     ] = &nv25_fb_oclass;
                device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -108,7 +108,7 @@ nv20_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-               device->oclass[NVDEV_SUBDEV_FB     ] = &nv20_fb_oclass;
+               device->oclass[NVDEV_SUBDEV_FB     ] = &nv25_fb_oclass;
                device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
index 7f4b8fe..0ac1b2c 100644 (file)
@@ -72,7 +72,7 @@ nv30_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-               device->oclass[NVDEV_SUBDEV_FB     ] = &nv30_fb_oclass;
+               device->oclass[NVDEV_SUBDEV_FB     ] = &nv35_fb_oclass;
                device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -109,7 +109,7 @@ nv30_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-               device->oclass[NVDEV_SUBDEV_FB     ] = &nv30_fb_oclass;
+               device->oclass[NVDEV_SUBDEV_FB     ] = &nv36_fb_oclass;
                device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -128,7 +128,7 @@ nv30_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-               device->oclass[NVDEV_SUBDEV_FB     ] = &nv30_fb_oclass;
+               device->oclass[NVDEV_SUBDEV_FB     ] = &nv10_fb_oclass;
                device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv04_vmmgr_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
index 42deadc..41d5968 100644 (file)
@@ -76,7 +76,7 @@ nv40_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-               device->oclass[NVDEV_SUBDEV_FB     ] = &nv40_fb_oclass;
+               device->oclass[NVDEV_SUBDEV_FB     ] = &nv41_fb_oclass;
                device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv41_vmmgr_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -96,7 +96,7 @@ nv40_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-               device->oclass[NVDEV_SUBDEV_FB     ] = &nv40_fb_oclass;
+               device->oclass[NVDEV_SUBDEV_FB     ] = &nv41_fb_oclass;
                device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv41_vmmgr_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -116,7 +116,7 @@ nv40_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-               device->oclass[NVDEV_SUBDEV_FB     ] = &nv40_fb_oclass;
+               device->oclass[NVDEV_SUBDEV_FB     ] = &nv41_fb_oclass;
                device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv41_vmmgr_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -156,7 +156,7 @@ nv40_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-               device->oclass[NVDEV_SUBDEV_FB     ] = &nv40_fb_oclass;
+               device->oclass[NVDEV_SUBDEV_FB     ] = &nv47_fb_oclass;
                device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv41_vmmgr_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -176,7 +176,7 @@ nv40_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-               device->oclass[NVDEV_SUBDEV_FB     ] = &nv40_fb_oclass;
+               device->oclass[NVDEV_SUBDEV_FB     ] = &nv49_fb_oclass;
                device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv41_vmmgr_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -196,7 +196,7 @@ nv40_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] = &nv04_mc_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-               device->oclass[NVDEV_SUBDEV_FB     ] = &nv40_fb_oclass;
+               device->oclass[NVDEV_SUBDEV_FB     ] = &nv49_fb_oclass;
                device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv41_vmmgr_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -216,7 +216,7 @@ nv40_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] = &nv44_mc_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-               device->oclass[NVDEV_SUBDEV_FB     ] = &nv40_fb_oclass;
+               device->oclass[NVDEV_SUBDEV_FB     ] = &nv44_fb_oclass;
                device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -236,7 +236,7 @@ nv40_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] = &nv44_mc_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-               device->oclass[NVDEV_SUBDEV_FB     ] = &nv40_fb_oclass;
+               device->oclass[NVDEV_SUBDEV_FB     ] = &nv46_fb_oclass;
                device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -256,7 +256,7 @@ nv40_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] = &nv44_mc_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-               device->oclass[NVDEV_SUBDEV_FB     ] = &nv40_fb_oclass;
+               device->oclass[NVDEV_SUBDEV_FB     ] = &nv44_fb_oclass;
                device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -276,7 +276,7 @@ nv40_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] = &nv44_mc_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-               device->oclass[NVDEV_SUBDEV_FB     ] = &nv40_fb_oclass;
+               device->oclass[NVDEV_SUBDEV_FB     ] = &nv46_fb_oclass;
                device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -296,7 +296,7 @@ nv40_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] = &nv44_mc_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-               device->oclass[NVDEV_SUBDEV_FB     ] = &nv40_fb_oclass;
+               device->oclass[NVDEV_SUBDEV_FB     ] = &nv4e_fb_oclass;
                device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -316,7 +316,7 @@ nv40_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] = &nv44_mc_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-               device->oclass[NVDEV_SUBDEV_FB     ] = &nv40_fb_oclass;
+               device->oclass[NVDEV_SUBDEV_FB     ] = &nv46_fb_oclass;
                device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -336,7 +336,7 @@ nv40_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] = &nv44_mc_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-               device->oclass[NVDEV_SUBDEV_FB     ] = &nv40_fb_oclass;
+               device->oclass[NVDEV_SUBDEV_FB     ] = &nv46_fb_oclass;
                device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -356,7 +356,7 @@ nv40_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
                device->oclass[NVDEV_SUBDEV_MC     ] = &nv44_mc_oclass;
                device->oclass[NVDEV_SUBDEV_TIMER  ] = &nv04_timer_oclass;
-               device->oclass[NVDEV_SUBDEV_FB     ] = &nv40_fb_oclass;
+               device->oclass[NVDEV_SUBDEV_FB     ] = &nv46_fb_oclass;
                device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nv44_vmmgr_oclass;
                device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
index fec3bcc..6ccfd85 100644 (file)
@@ -98,7 +98,7 @@ nv50_identify(struct nouveau_device *device)
                device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
                device->oclass[NVDEV_ENGINE_CRYPT  ] = &nv84_crypt_oclass;
                device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
-               device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+               device->oclass[NVDEV_ENGINE_DISP   ] = &nv84_disp_oclass;
                break;
        case 0x86:
                device->cname = "G86";
@@ -123,7 +123,7 @@ nv50_identify(struct nouveau_device *device)
                device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
                device->oclass[NVDEV_ENGINE_CRYPT  ] = &nv84_crypt_oclass;
                device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
-               device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+               device->oclass[NVDEV_ENGINE_DISP   ] = &nv84_disp_oclass;
                break;
        case 0x92:
                device->cname = "G92";
@@ -148,7 +148,7 @@ nv50_identify(struct nouveau_device *device)
                device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
                device->oclass[NVDEV_ENGINE_CRYPT  ] = &nv84_crypt_oclass;
                device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
-               device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+               device->oclass[NVDEV_ENGINE_DISP   ] = &nv84_disp_oclass;
                break;
        case 0x94:
                device->cname = "G94";
@@ -173,7 +173,7 @@ nv50_identify(struct nouveau_device *device)
                device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
                device->oclass[NVDEV_ENGINE_CRYPT  ] = &nv84_crypt_oclass;
                device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
-               device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+               device->oclass[NVDEV_ENGINE_DISP   ] = &nv94_disp_oclass;
                break;
        case 0x96:
                device->cname = "G96";
@@ -198,7 +198,7 @@ nv50_identify(struct nouveau_device *device)
                device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
                device->oclass[NVDEV_ENGINE_CRYPT  ] = &nv84_crypt_oclass;
                device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
-               device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+               device->oclass[NVDEV_ENGINE_DISP   ] = &nv94_disp_oclass;
                break;
        case 0x98:
                device->cname = "G98";
@@ -223,7 +223,7 @@ nv50_identify(struct nouveau_device *device)
                device->oclass[NVDEV_ENGINE_CRYPT  ] = &nv98_crypt_oclass;
                device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
                device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
-               device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+               device->oclass[NVDEV_ENGINE_DISP   ] = &nv94_disp_oclass;
                break;
        case 0xa0:
                device->cname = "G200";
@@ -248,7 +248,7 @@ nv50_identify(struct nouveau_device *device)
                device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
                device->oclass[NVDEV_ENGINE_CRYPT  ] = &nv84_crypt_oclass;
                device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
-               device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+               device->oclass[NVDEV_ENGINE_DISP   ] = &nva0_disp_oclass;
                break;
        case 0xaa:
                device->cname = "MCP77/MCP78";
@@ -273,7 +273,7 @@ nv50_identify(struct nouveau_device *device)
                device->oclass[NVDEV_ENGINE_CRYPT  ] = &nv98_crypt_oclass;
                device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
                device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
-               device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+               device->oclass[NVDEV_ENGINE_DISP   ] = &nv94_disp_oclass;
                break;
        case 0xac:
                device->cname = "MCP79/MCP7A";
@@ -298,7 +298,7 @@ nv50_identify(struct nouveau_device *device)
                device->oclass[NVDEV_ENGINE_CRYPT  ] = &nv98_crypt_oclass;
                device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
                device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
-               device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+               device->oclass[NVDEV_ENGINE_DISP   ] = &nv94_disp_oclass;
                break;
        case 0xa3:
                device->cname = "GT215";
@@ -324,7 +324,7 @@ nv50_identify(struct nouveau_device *device)
                device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
                device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
                device->oclass[NVDEV_ENGINE_COPY0  ] = &nva3_copy_oclass;
-               device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+               device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
                break;
        case 0xa5:
                device->cname = "GT216";
@@ -349,7 +349,7 @@ nv50_identify(struct nouveau_device *device)
                device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
                device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
                device->oclass[NVDEV_ENGINE_COPY0  ] = &nva3_copy_oclass;
-               device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+               device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
                break;
        case 0xa8:
                device->cname = "GT218";
@@ -374,7 +374,7 @@ nv50_identify(struct nouveau_device *device)
                device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
                device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
                device->oclass[NVDEV_ENGINE_COPY0  ] = &nva3_copy_oclass;
-               device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+               device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
                break;
        case 0xaf:
                device->cname = "MCP89";
@@ -399,7 +399,7 @@ nv50_identify(struct nouveau_device *device)
                device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
                device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
                device->oclass[NVDEV_ENGINE_COPY0  ] = &nva3_copy_oclass;
-               device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+               device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
                break;
        default:
                nv_fatal(device, "unknown Tesla chipset\n");
index 6697f0f..f046168 100644 (file)
@@ -74,12 +74,12 @@ nvc0_identify(struct nouveau_device *device)
                device->oclass[NVDEV_ENGINE_FIFO   ] = &nvc0_fifo_oclass;
                device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
                device->oclass[NVDEV_ENGINE_GR     ] = &nvc0_graph_oclass;
-               device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
-               device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
-               device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
+               device->oclass[NVDEV_ENGINE_VP     ] = &nvc0_vp_oclass;
+               device->oclass[NVDEV_ENGINE_BSP    ] = &nvc0_bsp_oclass;
+               device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
                device->oclass[NVDEV_ENGINE_COPY0  ] = &nvc0_copy0_oclass;
                device->oclass[NVDEV_ENGINE_COPY1  ] = &nvc0_copy1_oclass;
-               device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+               device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
                break;
        case 0xc4:
                device->cname = "GF104";
@@ -102,12 +102,12 @@ nvc0_identify(struct nouveau_device *device)
                device->oclass[NVDEV_ENGINE_FIFO   ] = &nvc0_fifo_oclass;
                device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
                device->oclass[NVDEV_ENGINE_GR     ] = &nvc0_graph_oclass;
-               device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
-               device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
-               device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
+               device->oclass[NVDEV_ENGINE_VP     ] = &nvc0_vp_oclass;
+               device->oclass[NVDEV_ENGINE_BSP    ] = &nvc0_bsp_oclass;
+               device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
                device->oclass[NVDEV_ENGINE_COPY0  ] = &nvc0_copy0_oclass;
                device->oclass[NVDEV_ENGINE_COPY1  ] = &nvc0_copy1_oclass;
-               device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+               device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
                break;
        case 0xc3:
                device->cname = "GF106";
@@ -130,12 +130,12 @@ nvc0_identify(struct nouveau_device *device)
                device->oclass[NVDEV_ENGINE_FIFO   ] = &nvc0_fifo_oclass;
                device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
                device->oclass[NVDEV_ENGINE_GR     ] = &nvc0_graph_oclass;
-               device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
-               device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
-               device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
+               device->oclass[NVDEV_ENGINE_VP     ] = &nvc0_vp_oclass;
+               device->oclass[NVDEV_ENGINE_BSP    ] = &nvc0_bsp_oclass;
+               device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
                device->oclass[NVDEV_ENGINE_COPY0  ] = &nvc0_copy0_oclass;
                device->oclass[NVDEV_ENGINE_COPY1  ] = &nvc0_copy1_oclass;
-               device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+               device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
                break;
        case 0xce:
                device->cname = "GF114";
@@ -158,12 +158,12 @@ nvc0_identify(struct nouveau_device *device)
                device->oclass[NVDEV_ENGINE_FIFO   ] = &nvc0_fifo_oclass;
                device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
                device->oclass[NVDEV_ENGINE_GR     ] = &nvc0_graph_oclass;
-               device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
-               device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
-               device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
+               device->oclass[NVDEV_ENGINE_VP     ] = &nvc0_vp_oclass;
+               device->oclass[NVDEV_ENGINE_BSP    ] = &nvc0_bsp_oclass;
+               device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
                device->oclass[NVDEV_ENGINE_COPY0  ] = &nvc0_copy0_oclass;
                device->oclass[NVDEV_ENGINE_COPY1  ] = &nvc0_copy1_oclass;
-               device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+               device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
                break;
        case 0xcf:
                device->cname = "GF116";
@@ -186,12 +186,12 @@ nvc0_identify(struct nouveau_device *device)
                device->oclass[NVDEV_ENGINE_FIFO   ] = &nvc0_fifo_oclass;
                device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
                device->oclass[NVDEV_ENGINE_GR     ] = &nvc0_graph_oclass;
-               device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
-               device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
-               device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
+               device->oclass[NVDEV_ENGINE_VP     ] = &nvc0_vp_oclass;
+               device->oclass[NVDEV_ENGINE_BSP    ] = &nvc0_bsp_oclass;
+               device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
                device->oclass[NVDEV_ENGINE_COPY0  ] = &nvc0_copy0_oclass;
                device->oclass[NVDEV_ENGINE_COPY1  ] = &nvc0_copy1_oclass;
-               device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+               device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
                break;
        case 0xc1:
                device->cname = "GF108";
@@ -214,12 +214,12 @@ nvc0_identify(struct nouveau_device *device)
                device->oclass[NVDEV_ENGINE_FIFO   ] = &nvc0_fifo_oclass;
                device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
                device->oclass[NVDEV_ENGINE_GR     ] = &nvc0_graph_oclass;
-               device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
-               device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
-               device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
+               device->oclass[NVDEV_ENGINE_VP     ] = &nvc0_vp_oclass;
+               device->oclass[NVDEV_ENGINE_BSP    ] = &nvc0_bsp_oclass;
+               device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
                device->oclass[NVDEV_ENGINE_COPY0  ] = &nvc0_copy0_oclass;
                device->oclass[NVDEV_ENGINE_COPY1  ] = &nvc0_copy1_oclass;
-               device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+               device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
                break;
        case 0xc8:
                device->cname = "GF110";
@@ -242,12 +242,12 @@ nvc0_identify(struct nouveau_device *device)
                device->oclass[NVDEV_ENGINE_FIFO   ] = &nvc0_fifo_oclass;
                device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
                device->oclass[NVDEV_ENGINE_GR     ] = &nvc0_graph_oclass;
-               device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
-               device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
-               device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
+               device->oclass[NVDEV_ENGINE_VP     ] = &nvc0_vp_oclass;
+               device->oclass[NVDEV_ENGINE_BSP    ] = &nvc0_bsp_oclass;
+               device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
                device->oclass[NVDEV_ENGINE_COPY0  ] = &nvc0_copy0_oclass;
                device->oclass[NVDEV_ENGINE_COPY1  ] = &nvc0_copy1_oclass;
-               device->oclass[NVDEV_ENGINE_DISP   ] = &nv50_disp_oclass;
+               device->oclass[NVDEV_ENGINE_DISP   ] = &nva3_disp_oclass;
                break;
        case 0xd9:
                device->cname = "GF119";
@@ -266,13 +266,13 @@ nvc0_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
                device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
-               device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+               device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
                device->oclass[NVDEV_ENGINE_FIFO   ] = &nvc0_fifo_oclass;
                device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
                device->oclass[NVDEV_ENGINE_GR     ] = &nvc0_graph_oclass;
-               device->oclass[NVDEV_ENGINE_VP     ] = &nv84_vp_oclass;
-               device->oclass[NVDEV_ENGINE_BSP    ] = &nv84_bsp_oclass;
-               device->oclass[NVDEV_ENGINE_PPP    ] = &nv98_ppp_oclass;
+               device->oclass[NVDEV_ENGINE_VP     ] = &nvc0_vp_oclass;
+               device->oclass[NVDEV_ENGINE_BSP    ] = &nvc0_bsp_oclass;
+               device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
                device->oclass[NVDEV_ENGINE_COPY0  ] = &nvc0_copy0_oclass;
                device->oclass[NVDEV_ENGINE_DISP   ] = &nvd0_disp_oclass;
                break;
index 4a280b7..9b7881e 100644 (file)
@@ -45,6 +45,9 @@
 #include <engine/graph.h>
 #include <engine/disp.h>
 #include <engine/copy.h>
+#include <engine/bsp.h>
+#include <engine/vp.h>
+#include <engine/ppp.h>
 
 int
 nve0_identify(struct nouveau_device *device)
@@ -67,13 +70,16 @@ nve0_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
                device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
-               device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+               device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
                device->oclass[NVDEV_ENGINE_FIFO   ] = &nve0_fifo_oclass;
                device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
                device->oclass[NVDEV_ENGINE_GR     ] = &nve0_graph_oclass;
-               device->oclass[NVDEV_ENGINE_DISP   ] = &nvd0_disp_oclass;
+               device->oclass[NVDEV_ENGINE_DISP   ] = &nve0_disp_oclass;
                device->oclass[NVDEV_ENGINE_COPY0  ] = &nve0_copy0_oclass;
                device->oclass[NVDEV_ENGINE_COPY1  ] = &nve0_copy1_oclass;
+               device->oclass[NVDEV_ENGINE_BSP    ] = &nve0_bsp_oclass;
+               device->oclass[NVDEV_ENGINE_VP     ] = &nve0_vp_oclass;
+               device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
                break;
        case 0xe7:
                device->cname = "GK107";
@@ -92,13 +98,16 @@ nve0_identify(struct nouveau_device *device)
                device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
                device->oclass[NVDEV_SUBDEV_VM     ] = &nvc0_vmmgr_oclass;
                device->oclass[NVDEV_SUBDEV_BAR    ] = &nvc0_bar_oclass;
-               device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+               device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
                device->oclass[NVDEV_ENGINE_FIFO   ] = &nve0_fifo_oclass;
                device->oclass[NVDEV_ENGINE_SW     ] = &nvc0_software_oclass;
                device->oclass[NVDEV_ENGINE_GR     ] = &nve0_graph_oclass;
-               device->oclass[NVDEV_ENGINE_DISP   ] = &nvd0_disp_oclass;
+               device->oclass[NVDEV_ENGINE_DISP   ] = &nve0_disp_oclass;
                device->oclass[NVDEV_ENGINE_COPY0  ] = &nve0_copy0_oclass;
                device->oclass[NVDEV_ENGINE_COPY1  ] = &nve0_copy1_oclass;
+               device->oclass[NVDEV_ENGINE_BSP    ] = &nve0_bsp_oclass;
+               device->oclass[NVDEV_ENGINE_VP     ] = &nve0_vp_oclass;
+               device->oclass[NVDEV_ENGINE_PPP    ] = &nvc0_ppp_oclass;
                break;
        default:
                nv_fatal(device, "unknown Kepler chipset\n");
index 61becfa..ae7249b 100644 (file)
  * Authors: Ben Skeggs
  */
 
+#include <subdev/bios.h>
+#include <subdev/bios/dcb.h>
+#include <subdev/bios/disp.h>
+#include <subdev/bios/init.h>
 #include <subdev/devinit.h>
 #include <subdev/vga.h>
 
@@ -55,7 +59,12 @@ nv50_devinit_dtor(struct nouveau_object *object)
 static int
 nv50_devinit_init(struct nouveau_object *object)
 {
+       struct nouveau_bios *bios = nouveau_bios(object);
        struct nv50_devinit_priv *priv = (void *)object;
+       struct nvbios_outp info;
+       struct dcb_output outp;
+       u8  ver = 0xff, hdr, cnt, len;
+       int ret, i = 0;
 
        if (!priv->base.post) {
                if (!nv_rdvgac(priv, 0, 0x00) &&
@@ -65,7 +74,30 @@ nv50_devinit_init(struct nouveau_object *object)
                }
        }
 
-       return nouveau_devinit_init(&priv->base);
+       ret = nouveau_devinit_init(&priv->base);
+       if (ret)
+               return ret;
+
+       /* if we ran the init tables, execute first script pointer for each
+        * display table output entry that has a matching dcb entry.
+        */
+       while (priv->base.post && ver) {
+               u16 data = nvbios_outp_parse(bios, i++, &ver, &hdr, &cnt, &len, &info);
+               if (data && dcb_outp_match(bios, info.type, info.mask, &ver, &len, &outp)) {
+                       struct nvbios_init init = {
+                               .subdev = nv_subdev(priv),
+                               .bios = bios,
+                               .offset = info.script[0],
+                               .outp = &outp,
+                               .crtc = -1,
+                               .execute = 1,
+                       };
+
+                       nvbios_exec(&init);
+               }
+       };
+
+       return 0;
 }
 
 static int
index f0086de..d6d1600 100644 (file)
@@ -57,25 +57,45 @@ nouveau_fb_bios_memtype(struct nouveau_bios *bios)
 }
 
 int
-nouveau_fb_init(struct nouveau_fb *pfb)
+nouveau_fb_preinit(struct nouveau_fb *pfb)
 {
-       int ret, i;
+       static const char *name[] = {
+               [NV_MEM_TYPE_UNKNOWN] = "unknown",
+               [NV_MEM_TYPE_STOLEN ] = "stolen system memory",
+               [NV_MEM_TYPE_SGRAM  ] = "SGRAM",
+               [NV_MEM_TYPE_SDRAM  ] = "SDRAM",
+               [NV_MEM_TYPE_DDR1   ] = "DDR1",
+               [NV_MEM_TYPE_DDR2   ] = "DDR2",
+               [NV_MEM_TYPE_DDR3   ] = "DDR3",
+               [NV_MEM_TYPE_GDDR2  ] = "GDDR2",
+               [NV_MEM_TYPE_GDDR3  ] = "GDDR3",
+               [NV_MEM_TYPE_GDDR4  ] = "GDDR4",
+               [NV_MEM_TYPE_GDDR5  ] = "GDDR5",
+       };
+       int ret, tags;
 
-       ret = nouveau_subdev_init(&pfb->base);
-       if (ret)
-               return ret;
+       tags = pfb->ram.init(pfb);
+       if (tags < 0 || !pfb->ram.size) {
+               nv_fatal(pfb, "error detecting memory configuration!!\n");
+               return (tags < 0) ? tags : -ERANGE;
+       }
 
-       for (i = 0; i < pfb->tile.regions; i++)
-               pfb->tile.prog(pfb, i, &pfb->tile.region[i]);
+       if (!nouveau_mm_initialised(&pfb->vram)) {
+               ret = nouveau_mm_init(&pfb->vram, 0, pfb->ram.size >> 12, 1);
+               if (ret)
+                       return ret;
+       }
 
-       return 0;
-}
+       if (!nouveau_mm_initialised(&pfb->tags) && tags) {
+               ret = nouveau_mm_init(&pfb->tags, 0, ++tags, 1);
+               if (ret)
+                       return ret;
+       }
 
-int
-_nouveau_fb_init(struct nouveau_object *object)
-{
-       struct nouveau_fb *pfb = (void *)object;
-       return nouveau_fb_init(pfb);
+       nv_info(pfb, "RAM type: %s\n", name[pfb->ram.type]);
+       nv_info(pfb, "RAM size: %d MiB\n", (int)(pfb->ram.size >> 20));
+       nv_info(pfb, "   ZCOMP: %d tags\n", tags);
+       return 0;
 }
 
 void
@@ -85,12 +105,8 @@ nouveau_fb_destroy(struct nouveau_fb *pfb)
 
        for (i = 0; i < pfb->tile.regions; i++)
                pfb->tile.fini(pfb, i, &pfb->tile.region[i]);
-
-       if (pfb->tags.block_size)
-               nouveau_mm_fini(&pfb->tags);
-
-       if (pfb->vram.block_size)
-               nouveau_mm_fini(&pfb->vram);
+       nouveau_mm_fini(&pfb->tags);
+       nouveau_mm_fini(&pfb->vram);
 
        nouveau_subdev_destroy(&pfb->base);
 }
@@ -101,30 +117,24 @@ _nouveau_fb_dtor(struct nouveau_object *object)
        struct nouveau_fb *pfb = (void *)object;
        nouveau_fb_destroy(pfb);
 }
-
 int
-nouveau_fb_created(struct nouveau_fb *pfb)
+nouveau_fb_init(struct nouveau_fb *pfb)
 {
-       static const char *name[] = {
-               [NV_MEM_TYPE_UNKNOWN] = "unknown",
-               [NV_MEM_TYPE_STOLEN ] = "stolen system memory",
-               [NV_MEM_TYPE_SGRAM  ] = "SGRAM",
-               [NV_MEM_TYPE_SDRAM  ] = "SDRAM",
-               [NV_MEM_TYPE_DDR1   ] = "DDR1",
-               [NV_MEM_TYPE_DDR2   ] = "DDR2",
-               [NV_MEM_TYPE_DDR3   ] = "DDR3",
-               [NV_MEM_TYPE_GDDR2  ] = "GDDR2",
-               [NV_MEM_TYPE_GDDR3  ] = "GDDR3",
-               [NV_MEM_TYPE_GDDR4  ] = "GDDR4",
-               [NV_MEM_TYPE_GDDR5  ] = "GDDR5",
-       };
+       int ret, i;
 
-       if (pfb->ram.size == 0) {
-               nv_fatal(pfb, "no vram detected!!\n");
-               return -ERANGE;
-       }
+       ret = nouveau_subdev_init(&pfb->base);
+       if (ret)
+               return ret;
+
+       for (i = 0; i < pfb->tile.regions; i++)
+               pfb->tile.prog(pfb, i, &pfb->tile.region[i]);
 
-       nv_info(pfb, "RAM type: %s\n", name[pfb->ram.type]);
-       nv_info(pfb, "RAM size: %d MiB\n", (int)(pfb->ram.size >> 20));
        return 0;
 }
+
+int
+_nouveau_fb_init(struct nouveau_object *object)
+{
+       struct nouveau_fb *pfb = (void *)object;
+       return nouveau_fb_init(pfb);
+}
index eb06836..6e369f8 100644 (file)
@@ -56,6 +56,37 @@ nv04_fb_memtype_valid(struct nouveau_fb *pfb, u32 tile_flags)
 }
 
 static int
+nv04_fb_vram_init(struct nouveau_fb *pfb)
+{
+       u32 boot0 = nv_rd32(pfb, NV04_PFB_BOOT_0);
+       if (boot0 & 0x00000100) {
+               pfb->ram.size  = ((boot0 >> 12) & 0xf) * 2 + 2;
+               pfb->ram.size *= 1024 * 1024;
+       } else {
+               switch (boot0 & NV04_PFB_BOOT_0_RAM_AMOUNT) {
+               case NV04_PFB_BOOT_0_RAM_AMOUNT_32MB:
+                       pfb->ram.size = 32 * 1024 * 1024;
+                       break;
+               case NV04_PFB_BOOT_0_RAM_AMOUNT_16MB:
+                       pfb->ram.size = 16 * 1024 * 1024;
+                       break;
+               case NV04_PFB_BOOT_0_RAM_AMOUNT_8MB:
+                       pfb->ram.size = 8 * 1024 * 1024;
+                       break;
+               case NV04_PFB_BOOT_0_RAM_AMOUNT_4MB:
+                       pfb->ram.size = 4 * 1024 * 1024;
+                       break;
+               }
+       }
+
+       if ((boot0 & 0x00000038) <= 0x10)
+               pfb->ram.type = NV_MEM_TYPE_SGRAM;
+       else
+               pfb->ram.type = NV_MEM_TYPE_SDRAM;
+       return 0;
+}
+
+static int
 nv04_fb_init(struct nouveau_object *object)
 {
        struct nv04_fb_priv *priv = (void *)object;
@@ -79,7 +110,6 @@ nv04_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
             struct nouveau_object **pobject)
 {
        struct nv04_fb_priv *priv;
-       u32 boot0;
        int ret;
 
        ret = nouveau_fb_create(parent, engine, oclass, &priv);
@@ -87,35 +117,9 @@ nv04_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        if (ret)
                return ret;
 
-       boot0 = nv_rd32(priv, NV04_PFB_BOOT_0);
-       if (boot0 & 0x00000100) {
-               priv->base.ram.size  = ((boot0 >> 12) & 0xf) * 2 + 2;
-               priv->base.ram.size *= 1024 * 1024;
-       } else {
-               switch (boot0 & NV04_PFB_BOOT_0_RAM_AMOUNT) {
-               case NV04_PFB_BOOT_0_RAM_AMOUNT_32MB:
-                       priv->base.ram.size = 32 * 1024 * 1024;
-                       break;
-               case NV04_PFB_BOOT_0_RAM_AMOUNT_16MB:
-                       priv->base.ram.size = 16 * 1024 * 1024;
-                       break;
-               case NV04_PFB_BOOT_0_RAM_AMOUNT_8MB:
-                       priv->base.ram.size = 8 * 1024 * 1024;
-                       break;
-               case NV04_PFB_BOOT_0_RAM_AMOUNT_4MB:
-                       priv->base.ram.size = 4 * 1024 * 1024;
-                       break;
-               }
-       }
-
-       if ((boot0 & 0x00000038) <= 0x10)
-               priv->base.ram.type = NV_MEM_TYPE_SGRAM;
-       else
-               priv->base.ram.type = NV_MEM_TYPE_SDRAM;
-
-
        priv->base.memtype_valid = nv04_fb_memtype_valid;
-       return nouveau_fb_created(&priv->base);
+       priv->base.ram.init = nv04_fb_vram_init;
+       return nouveau_fb_preinit(&priv->base);
 }
 
 struct nouveau_oclass
index f037a42..edbbe26 100644 (file)
@@ -30,7 +30,20 @@ struct nv10_fb_priv {
        struct nouveau_fb base;
 };
 
-static void
+static int
+nv10_fb_vram_init(struct nouveau_fb *pfb)
+{
+       u32 cfg0 = nv_rd32(pfb, 0x100200);
+       if (cfg0 & 0x00000001)
+               pfb->ram.type = NV_MEM_TYPE_DDR1;
+       else
+               pfb->ram.type = NV_MEM_TYPE_SDRAM;
+
+       pfb->ram.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
+       return 0;
+}
+
+void
 nv10_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
                  u32 flags, struct nouveau_fb_tile *tile)
 {
@@ -39,7 +52,7 @@ nv10_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
        tile->pitch = pitch;
 }
 
-static void
+void
 nv10_fb_tile_fini(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
 {
        tile->addr  = 0;
@@ -54,6 +67,7 @@ nv10_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
        nv_wr32(pfb, 0x100244 + (i * 0x10), tile->limit);
        nv_wr32(pfb, 0x100248 + (i * 0x10), tile->pitch);
        nv_wr32(pfb, 0x100240 + (i * 0x10), tile->addr);
+       nv_rd32(pfb, 0x100240 + (i * 0x10));
 }
 
 static int
@@ -61,7 +75,6 @@ nv10_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
             struct nouveau_oclass *oclass, void *data, u32 size,
             struct nouveau_object **pobject)
 {
-       struct nouveau_device *device = nv_device(parent);
        struct nv10_fb_priv *priv;
        int ret;
 
@@ -70,42 +83,13 @@ nv10_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        if (ret)
                return ret;
 
-       if (device->chipset == 0x1a ||  device->chipset == 0x1f) {
-               struct pci_dev *bridge;
-               u32 mem, mib;
-
-               bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1));
-               if (!bridge) {
-                       nv_fatal(device, "no bridge device\n");
-                       return 0;
-               }
-
-               if (device->chipset == 0x1a) {
-                       pci_read_config_dword(bridge, 0x7c, &mem);
-                       mib = ((mem >> 6) & 31) + 1;
-               } else {
-                       pci_read_config_dword(bridge, 0x84, &mem);
-                       mib = ((mem >> 4) & 127) + 1;
-               }
-
-               priv->base.ram.type = NV_MEM_TYPE_STOLEN;
-               priv->base.ram.size = mib * 1024 * 1024;
-       } else {
-               u32 cfg0 = nv_rd32(priv, 0x100200);
-               if (cfg0 & 0x00000001)
-                       priv->base.ram.type = NV_MEM_TYPE_DDR1;
-               else
-                       priv->base.ram.type = NV_MEM_TYPE_SDRAM;
-
-               priv->base.ram.size = nv_rd32(priv, 0x10020c) & 0xff000000;
-       }
-
        priv->base.memtype_valid = nv04_fb_memtype_valid;
+       priv->base.ram.init = nv10_fb_vram_init;
        priv->base.tile.regions = 8;
        priv->base.tile.init = nv10_fb_tile_init;
        priv->base.tile.fini = nv10_fb_tile_fini;
        priv->base.tile.prog = nv10_fb_tile_prog;
-       return nouveau_fb_created(&priv->base);
+       return nouveau_fb_preinit(&priv->base);
 }
 
 struct nouveau_oclass
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c
new file mode 100644 (file)
index 0000000..4836684
--- /dev/null
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv1a_fb_priv {
+       struct nouveau_fb base;
+};
+
+static int
+nv1a_fb_vram_init(struct nouveau_fb *pfb)
+{
+       struct pci_dev *bridge;
+       u32 mem, mib;
+
+       bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1));
+       if (!bridge) {
+               nv_fatal(pfb, "no bridge device\n");
+               return -ENODEV;
+       }
+
+       if (nv_device(pfb)->chipset == 0x1a) {
+               pci_read_config_dword(bridge, 0x7c, &mem);
+               mib = ((mem >> 6) & 31) + 1;
+       } else {
+               pci_read_config_dword(bridge, 0x84, &mem);
+               mib = ((mem >> 4) & 127) + 1;
+       }
+
+       pfb->ram.type = NV_MEM_TYPE_STOLEN;
+       pfb->ram.size = mib * 1024 * 1024;
+       return 0;
+}
+
+static int
+nv1a_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+            struct nouveau_oclass *oclass, void *data, u32 size,
+            struct nouveau_object **pobject)
+{
+       struct nv1a_fb_priv *priv;
+       int ret;
+
+       ret = nouveau_fb_create(parent, engine, oclass, &priv);
+       *pobject = nv_object(priv);
+       if (ret)
+               return ret;
+
+       priv->base.memtype_valid = nv04_fb_memtype_valid;
+       priv->base.ram.init = nv1a_fb_vram_init;
+       priv->base.tile.regions = 8;
+       priv->base.tile.init = nv10_fb_tile_init;
+       priv->base.tile.fini = nv10_fb_tile_fini;
+       priv->base.tile.prog = nv10_fb_tile_prog;
+       return nouveau_fb_preinit(&priv->base);
+}
+
+struct nouveau_oclass
+nv1a_fb_oclass = {
+       .handle = NV_SUBDEV(FB, 0x1a),
+       .ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = nv1a_fb_ctor,
+               .dtor = _nouveau_fb_dtor,
+               .init = _nouveau_fb_init,
+               .fini = _nouveau_fb_fini,
+       },
+};
index 4b3578f..5d14612 100644 (file)
@@ -30,43 +30,54 @@ struct nv20_fb_priv {
        struct nouveau_fb base;
 };
 
-static void
+int
+nv20_fb_vram_init(struct nouveau_fb *pfb)
+{
+       u32 pbus1218 = nv_rd32(pfb, 0x001218);
+
+       switch (pbus1218 & 0x00000300) {
+       case 0x00000000: pfb->ram.type = NV_MEM_TYPE_SDRAM; break;
+       case 0x00000100: pfb->ram.type = NV_MEM_TYPE_DDR1; break;
+       case 0x00000200: pfb->ram.type = NV_MEM_TYPE_GDDR3; break;
+       case 0x00000300: pfb->ram.type = NV_MEM_TYPE_GDDR2; break;
+       }
+       pfb->ram.size  = (nv_rd32(pfb, 0x10020c) & 0xff000000);
+       pfb->ram.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
+
+       return nv_rd32(pfb, 0x100320);
+}
+
+void
 nv20_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
                  u32 flags, struct nouveau_fb_tile *tile)
 {
-       struct nouveau_device *device = nv_device(pfb);
-       int bpp = (flags & 2) ? 32 : 16;
-
        tile->addr  = 0x00000001 | addr;
        tile->limit = max(1u, addr + size) - 1;
        tile->pitch = pitch;
-
-       /* Allocate some of the on-die tag memory, used to store Z
-        * compression meta-data (most likely just a bitmap determining
-        * if a given tile is compressed or not).
-        */
-       size /= 256;
        if (flags & 4) {
-               if (!nouveau_mm_head(&pfb->tags, 1, size, size, 1, &tile->tag)) {
-                       /* Enable Z compression */
-                       tile->zcomp = tile->tag->offset;
-                       if (device->chipset >= 0x25) {
-                               if (bpp == 16)
-                                       tile->zcomp |= 0x00100000;
-                               else
-                                       tile->zcomp |= 0x00200000;
-                       } else {
-                               tile->zcomp |= 0x80000000;
-                               if (bpp != 16)
-                                       tile->zcomp |= 0x04000000;
-                       }
-               }
-
+               pfb->tile.comp(pfb, i, size, flags, tile);
                tile->addr |= 2;
        }
 }
 
 static void
+nv20_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
+                 struct nouveau_fb_tile *tile)
+{
+       u32 tiles = DIV_ROUND_UP(size, 0x40);
+       u32 tags  = round_up(tiles / pfb->ram.parts, 0x40);
+       if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) {
+               if (!(flags & 2)) tile->zcomp = 0x00000000; /* Z16 */
+               else              tile->zcomp = 0x04000000; /* Z24S8 */
+               tile->zcomp |= tile->tag->offset;
+               tile->zcomp |= 0x80000000; /* enable */
+#ifdef __BIG_ENDIAN
+               tile->zcomp |= 0x08000000;
+#endif
+       }
+}
+
+void
 nv20_fb_tile_fini(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
 {
        tile->addr  = 0;
@@ -76,12 +87,13 @@ nv20_fb_tile_fini(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
        nouveau_mm_free(&pfb->tags, &tile->tag);
 }
 
-static void
+void
 nv20_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
 {
        nv_wr32(pfb, 0x100244 + (i * 0x10), tile->limit);
        nv_wr32(pfb, 0x100248 + (i * 0x10), tile->pitch);
        nv_wr32(pfb, 0x100240 + (i * 0x10), tile->addr);
+       nv_rd32(pfb, 0x100240 + (i * 0x10));
        nv_wr32(pfb, 0x100300 + (i * 0x04), tile->zcomp);
 }
 
@@ -90,9 +102,7 @@ nv20_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
             struct nouveau_oclass *oclass, void *data, u32 size,
             struct nouveau_object **pobject)
 {
-       struct nouveau_device *device = nv_device(parent);
        struct nv20_fb_priv *priv;
-       u32 pbus1218;
        int ret;
 
        ret = nouveau_fb_create(parent, engine, oclass, &priv);
@@ -100,28 +110,14 @@ nv20_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        if (ret)
                return ret;
 
-       pbus1218 = nv_rd32(priv, 0x001218);
-       switch (pbus1218 & 0x00000300) {
-       case 0x00000000: priv->base.ram.type = NV_MEM_TYPE_SDRAM; break;
-       case 0x00000100: priv->base.ram.type = NV_MEM_TYPE_DDR1; break;
-       case 0x00000200: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break;
-       case 0x00000300: priv->base.ram.type = NV_MEM_TYPE_GDDR2; break;
-       }
-       priv->base.ram.size = nv_rd32(priv, 0x10020c) & 0xff000000;
-
-       if (device->chipset >= 0x25)
-               ret = nouveau_mm_init(&priv->base.tags, 0, 64 * 1024, 1);
-       else
-               ret = nouveau_mm_init(&priv->base.tags, 0, 32 * 1024, 1);
-       if (ret)
-               return ret;
-
        priv->base.memtype_valid = nv04_fb_memtype_valid;
+       priv->base.ram.init = nv20_fb_vram_init;
        priv->base.tile.regions = 8;
        priv->base.tile.init = nv20_fb_tile_init;
+       priv->base.tile.comp = nv20_fb_tile_comp;
        priv->base.tile.fini = nv20_fb_tile_fini;
        priv->base.tile.prog = nv20_fb_tile_prog;
-       return nouveau_fb_created(&priv->base);
+       return nouveau_fb_preinit(&priv->base);
 }
 
 struct nouveau_oclass
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c
new file mode 100644 (file)
index 0000000..0042ace
--- /dev/null
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv25_fb_priv {
+       struct nouveau_fb base;
+};
+
+static void
+nv25_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
+                 struct nouveau_fb_tile *tile)
+{
+       u32 tiles = DIV_ROUND_UP(size, 0x40);
+       u32 tags  = round_up(tiles / pfb->ram.parts, 0x40);
+       if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) {
+               if (!(flags & 2)) tile->zcomp = 0x00100000; /* Z16 */
+               else              tile->zcomp = 0x00200000; /* Z24S8 */
+               tile->zcomp |= tile->tag->offset;
+#ifdef __BIG_ENDIAN
+               tile->zcomp |= 0x01000000;
+#endif
+       }
+}
+
+static int
+nv25_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+            struct nouveau_oclass *oclass, void *data, u32 size,
+            struct nouveau_object **pobject)
+{
+       struct nv25_fb_priv *priv;
+       int ret;
+
+       ret = nouveau_fb_create(parent, engine, oclass, &priv);
+       *pobject = nv_object(priv);
+       if (ret)
+               return ret;
+
+       priv->base.memtype_valid = nv04_fb_memtype_valid;
+       priv->base.ram.init = nv20_fb_vram_init;
+       priv->base.tile.regions = 8;
+       priv->base.tile.init = nv20_fb_tile_init;
+       priv->base.tile.comp = nv25_fb_tile_comp;
+       priv->base.tile.fini = nv20_fb_tile_fini;
+       priv->base.tile.prog = nv20_fb_tile_prog;
+       return nouveau_fb_preinit(&priv->base);
+}
+
+struct nouveau_oclass
+nv25_fb_oclass = {
+       .handle = NV_SUBDEV(FB, 0x25),
+       .ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = nv25_fb_ctor,
+               .dtor = _nouveau_fb_dtor,
+               .init = _nouveau_fb_init,
+               .fini = _nouveau_fb_fini,
+       },
+};
index cba67bc..a7ba0d0 100644 (file)
@@ -34,17 +34,36 @@ void
 nv30_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
                  u32 flags, struct nouveau_fb_tile *tile)
 {
-       tile->addr = addr | 1;
+       /* for performance, select alternate bank offset for zeta */
+       if (!(flags & 4)) {
+               tile->addr = (0 << 4);
+       } else {
+               if (pfb->tile.comp) /* z compression */
+                       pfb->tile.comp(pfb, i, size, flags, tile);
+               tile->addr = (1 << 4);
+       }
+
+       tile->addr |= 0x00000001; /* enable */
+       tile->addr |= addr;
        tile->limit = max(1u, addr + size) - 1;
        tile->pitch = pitch;
 }
 
-void
-nv30_fb_tile_fini(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
+static void
+nv30_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
+                 struct nouveau_fb_tile *tile)
 {
-       tile->addr  = 0;
-       tile->limit = 0;
-       tile->pitch = 0;
+       u32 tiles = DIV_ROUND_UP(size, 0x40);
+       u32 tags  = round_up(tiles / pfb->ram.parts, 0x40);
+       if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) {
+               if (flags & 2) tile->zcomp |= 0x01000000; /* Z16 */
+               else           tile->zcomp |= 0x02000000; /* Z24S8 */
+               tile->zcomp |= ((tile->tag->offset           ) >> 6);
+               tile->zcomp |= ((tile->tag->offset + tags - 1) >> 6) << 12;
+#ifdef __BIG_ENDIAN
+               tile->zcomp |= 0x10000000;
+#endif
+       }
 }
 
 static int
@@ -72,7 +91,7 @@ calc_ref(struct nv30_fb_priv *priv, int l, int k, int i)
        return x;
 }
 
-static int
+int
 nv30_fb_init(struct nouveau_object *object)
 {
        struct nouveau_device *device = nv_device(object);
@@ -111,7 +130,6 @@ nv30_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
             struct nouveau_object **pobject)
 {
        struct nv30_fb_priv *priv;
-       u32 pbus1218;
        int ret;
 
        ret = nouveau_fb_create(parent, engine, oclass, &priv);
@@ -119,21 +137,14 @@ nv30_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        if (ret)
                return ret;
 
-       pbus1218 = nv_rd32(priv, 0x001218);
-       switch (pbus1218 & 0x00000300) {
-       case 0x00000000: priv->base.ram.type = NV_MEM_TYPE_SDRAM; break;
-       case 0x00000100: priv->base.ram.type = NV_MEM_TYPE_DDR1; break;
-       case 0x00000200: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break;
-       case 0x00000300: priv->base.ram.type = NV_MEM_TYPE_GDDR2; break;
-       }
-       priv->base.ram.size = nv_rd32(priv, 0x10020c) & 0xff000000;
-
        priv->base.memtype_valid = nv04_fb_memtype_valid;
+       priv->base.ram.init = nv20_fb_vram_init;
        priv->base.tile.regions = 8;
        priv->base.tile.init = nv30_fb_tile_init;
-       priv->base.tile.fini = nv30_fb_tile_fini;
-       priv->base.tile.prog = nv10_fb_tile_prog;
-       return nouveau_fb_created(&priv->base);
+       priv->base.tile.comp = nv30_fb_tile_comp;
+       priv->base.tile.fini = nv20_fb_tile_fini;
+       priv->base.tile.prog = nv20_fb_tile_prog;
+       return nouveau_fb_preinit(&priv->base);
 }
 
 struct nouveau_oclass
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c
new file mode 100644 (file)
index 0000000..092f6f4
--- /dev/null
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv35_fb_priv {
+       struct nouveau_fb base;
+};
+
+static void
+nv35_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
+                 struct nouveau_fb_tile *tile)
+{
+       u32 tiles = DIV_ROUND_UP(size, 0x40);
+       u32 tags  = round_up(tiles / pfb->ram.parts, 0x40);
+       if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) {
+               if (flags & 2) tile->zcomp |= 0x04000000; /* Z16 */
+               else           tile->zcomp |= 0x08000000; /* Z24S8 */
+               tile->zcomp |= ((tile->tag->offset           ) >> 6);
+               tile->zcomp |= ((tile->tag->offset + tags - 1) >> 6) << 13;
+#ifdef __BIG_ENDIAN
+               tile->zcomp |= 0x40000000;
+#endif
+       }
+}
+
+static int
+nv35_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+            struct nouveau_oclass *oclass, void *data, u32 size,
+            struct nouveau_object **pobject)
+{
+       struct nv35_fb_priv *priv;
+       int ret;
+
+       ret = nouveau_fb_create(parent, engine, oclass, &priv);
+       *pobject = nv_object(priv);
+       if (ret)
+               return ret;
+
+       priv->base.memtype_valid = nv04_fb_memtype_valid;
+       priv->base.ram.init = nv20_fb_vram_init;
+       priv->base.tile.regions = 8;
+       priv->base.tile.init = nv30_fb_tile_init;
+       priv->base.tile.comp = nv35_fb_tile_comp;
+       priv->base.tile.fini = nv20_fb_tile_fini;
+       priv->base.tile.prog = nv20_fb_tile_prog;
+       return nouveau_fb_preinit(&priv->base);
+}
+
+struct nouveau_oclass
+nv35_fb_oclass = {
+       .handle = NV_SUBDEV(FB, 0x35),
+       .ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = nv35_fb_ctor,
+               .dtor = _nouveau_fb_dtor,
+               .init = nv30_fb_init,
+               .fini = _nouveau_fb_fini,
+       },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c
new file mode 100644 (file)
index 0000000..797ab3b
--- /dev/null
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv36_fb_priv {
+       struct nouveau_fb base;
+};
+
+static void
+nv36_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
+                 struct nouveau_fb_tile *tile)
+{
+       u32 tiles = DIV_ROUND_UP(size, 0x40);
+       u32 tags  = round_up(tiles / pfb->ram.parts, 0x40);
+       if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) {
+               if (flags & 2) tile->zcomp |= 0x10000000; /* Z16 */
+               else           tile->zcomp |= 0x20000000; /* Z24S8 */
+               tile->zcomp |= ((tile->tag->offset           ) >> 6);
+               tile->zcomp |= ((tile->tag->offset + tags - 1) >> 6) << 14;
+#ifdef __BIG_ENDIAN
+               tile->zcomp |= 0x80000000;
+#endif
+       }
+}
+
+static int
+nv36_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+            struct nouveau_oclass *oclass, void *data, u32 size,
+            struct nouveau_object **pobject)
+{
+       struct nv36_fb_priv *priv;
+       int ret;
+
+       ret = nouveau_fb_create(parent, engine, oclass, &priv);
+       *pobject = nv_object(priv);
+       if (ret)
+               return ret;
+
+       priv->base.memtype_valid = nv04_fb_memtype_valid;
+       priv->base.ram.init = nv20_fb_vram_init;
+       priv->base.tile.regions = 8;
+       priv->base.tile.init = nv30_fb_tile_init;
+       priv->base.tile.comp = nv36_fb_tile_comp;
+       priv->base.tile.fini = nv20_fb_tile_fini;
+       priv->base.tile.prog = nv20_fb_tile_prog;
+       return nouveau_fb_preinit(&priv->base);
+}
+
+struct nouveau_oclass
+nv36_fb_oclass = {
+       .handle = NV_SUBDEV(FB, 0x36),
+       .ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = nv36_fb_ctor,
+               .dtor = _nouveau_fb_dtor,
+               .init = nv30_fb_init,
+               .fini = _nouveau_fb_fini,
+       },
+};
index 347a496..65e131b 100644 (file)
@@ -30,34 +30,37 @@ struct nv40_fb_priv {
        struct nouveau_fb base;
 };
 
-static inline int
-nv44_graph_class(struct nouveau_device *device)
-{
-       if ((device->chipset & 0xf0) == 0x60)
-               return 1;
-
-       return !(0x0baf & (1 << (device->chipset & 0x0f)));
-}
-
-static void
-nv40_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
+static int
+nv40_fb_vram_init(struct nouveau_fb *pfb)
 {
-       nv_wr32(pfb, 0x100604 + (i * 0x10), tile->limit);
-       nv_wr32(pfb, 0x100608 + (i * 0x10), tile->pitch);
-       nv_wr32(pfb, 0x100600 + (i * 0x10), tile->addr);
-}
+       u32 pbus1218 = nv_rd32(pfb, 0x001218);
+       switch (pbus1218 & 0x00000300) {
+       case 0x00000000: pfb->ram.type = NV_MEM_TYPE_SDRAM; break;
+       case 0x00000100: pfb->ram.type = NV_MEM_TYPE_DDR1; break;
+       case 0x00000200: pfb->ram.type = NV_MEM_TYPE_GDDR3; break;
+       case 0x00000300: pfb->ram.type = NV_MEM_TYPE_DDR2; break;
+       }
 
-static void
-nv40_fb_init_gart(struct nv40_fb_priv *priv)
-{
-       nv_wr32(priv, 0x100800, 0x00000001);
+       pfb->ram.size  =  nv_rd32(pfb, 0x10020c) & 0xff000000;
+       pfb->ram.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
+       return nv_rd32(pfb, 0x100320);
 }
 
-static void
-nv44_fb_init_gart(struct nv40_fb_priv *priv)
+void
+nv40_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
+                 struct nouveau_fb_tile *tile)
 {
-       nv_wr32(priv, 0x100850, 0x80000000);
-       nv_wr32(priv, 0x100800, 0x00000001);
+       u32 tiles = DIV_ROUND_UP(size, 0x80);
+       u32 tags  = round_up(tiles / pfb->ram.parts, 0x100);
+       if ( (flags & 2) &&
+           !nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) {
+               tile->zcomp  = 0x28000000; /* Z24S8_SPLIT_GRAD */
+               tile->zcomp |= ((tile->tag->offset           ) >> 8);
+               tile->zcomp |= ((tile->tag->offset + tags - 1) >> 8) << 13;
+#ifdef __BIG_ENDIAN
+               tile->zcomp |= 0x40000000;
+#endif
+       }
 }
 
 static int
@@ -70,19 +73,7 @@ nv40_fb_init(struct nouveau_object *object)
        if (ret)
                return ret;
 
-       switch (nv_device(priv)->chipset) {
-       case 0x40:
-       case 0x45:
-               nv_mask(priv, 0x10033c, 0x00008000, 0x00000000);
-               break;
-       default:
-               if (nv44_graph_class(nv_device(priv)))
-                       nv44_fb_init_gart(priv);
-               else
-                       nv40_fb_init_gart(priv);
-               break;
-       }
-
+       nv_mask(priv, 0x10033c, 0x00008000, 0x00000000);
        return 0;
 }
 
@@ -91,7 +82,6 @@ nv40_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
             struct nouveau_oclass *oclass, void *data, u32 size,
             struct nouveau_object **pobject)
 {
-       struct nouveau_device *device = nv_device(parent);
        struct nv40_fb_priv *priv;
        int ret;
 
@@ -100,69 +90,14 @@ nv40_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        if (ret)
                return ret;
 
-       /* 0x001218 is actually present on a few other NV4X I looked at,
-        * and even contains sane values matching 0x100474.  From looking
-        * at various vbios images however, this isn't the case everywhere.
-        * So, I chose to use the same regs I've seen NVIDIA reading around
-        * the memory detection, hopefully that'll get us the right numbers
-        */
-       if (device->chipset == 0x40) {
-               u32 pbus1218 = nv_rd32(priv, 0x001218);
-               switch (pbus1218 & 0x00000300) {
-               case 0x00000000: priv->base.ram.type = NV_MEM_TYPE_SDRAM; break;
-               case 0x00000100: priv->base.ram.type = NV_MEM_TYPE_DDR1; break;
-               case 0x00000200: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break;
-               case 0x00000300: priv->base.ram.type = NV_MEM_TYPE_DDR2; break;
-               }
-       } else
-       if (device->chipset == 0x49 || device->chipset == 0x4b) {
-               u32 pfb914 = nv_rd32(priv, 0x100914);
-               switch (pfb914 & 0x00000003) {
-               case 0x00000000: priv->base.ram.type = NV_MEM_TYPE_DDR1; break;
-               case 0x00000001: priv->base.ram.type = NV_MEM_TYPE_DDR2; break;
-               case 0x00000002: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break;
-               case 0x00000003: break;
-               }
-       } else
-       if (device->chipset != 0x4e) {
-               u32 pfb474 = nv_rd32(priv, 0x100474);
-               if (pfb474 & 0x00000004)
-                       priv->base.ram.type = NV_MEM_TYPE_GDDR3;
-               if (pfb474 & 0x00000002)
-                       priv->base.ram.type = NV_MEM_TYPE_DDR2;
-               if (pfb474 & 0x00000001)
-                       priv->base.ram.type = NV_MEM_TYPE_DDR1;
-       } else {
-               priv->base.ram.type = NV_MEM_TYPE_STOLEN;
-       }
-
-       priv->base.ram.size = nv_rd32(priv, 0x10020c) & 0xff000000;
-
        priv->base.memtype_valid = nv04_fb_memtype_valid;
-       switch (device->chipset) {
-       case 0x40:
-       case 0x45:
-               priv->base.tile.regions = 8;
-               break;
-       case 0x46:
-       case 0x47:
-       case 0x49:
-       case 0x4b:
-       case 0x4c:
-               priv->base.tile.regions = 15;
-               break;
-       default:
-               priv->base.tile.regions = 12;
-               break;
-       }
+       priv->base.ram.init = nv40_fb_vram_init;
+       priv->base.tile.regions = 8;
        priv->base.tile.init = nv30_fb_tile_init;
-       priv->base.tile.fini = nv30_fb_tile_fini;
-       if (device->chipset == 0x40)
-               priv->base.tile.prog = nv10_fb_tile_prog;
-       else
-               priv->base.tile.prog = nv40_fb_tile_prog;
-
-       return nouveau_fb_created(&priv->base);
+       priv->base.tile.comp = nv40_fb_tile_comp;
+       priv->base.tile.fini = nv20_fb_tile_fini;
+       priv->base.tile.prog = nv20_fb_tile_prog;
+       return nouveau_fb_preinit(&priv->base);
 }
 
 
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c
new file mode 100644 (file)
index 0000000..e9e5a08
--- /dev/null
@@ -0,0 +1,106 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv41_fb_priv {
+       struct nouveau_fb base;
+};
+
+int
+nv41_fb_vram_init(struct nouveau_fb *pfb)
+{
+       u32 pfb474 = nv_rd32(pfb, 0x100474);
+       if (pfb474 & 0x00000004)
+               pfb->ram.type = NV_MEM_TYPE_GDDR3;
+       if (pfb474 & 0x00000002)
+               pfb->ram.type = NV_MEM_TYPE_DDR2;
+       if (pfb474 & 0x00000001)
+               pfb->ram.type = NV_MEM_TYPE_DDR1;
+
+       pfb->ram.size =   nv_rd32(pfb, 0x10020c) & 0xff000000;
+       pfb->ram.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
+       return nv_rd32(pfb, 0x100320);
+}
+
+void
+nv41_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
+{
+       nv_wr32(pfb, 0x100604 + (i * 0x10), tile->limit);
+       nv_wr32(pfb, 0x100608 + (i * 0x10), tile->pitch);
+       nv_wr32(pfb, 0x100600 + (i * 0x10), tile->addr);
+       nv_rd32(pfb, 0x100600 + (i * 0x10));
+       nv_wr32(pfb, 0x100700 + (i * 0x04), tile->zcomp);
+}
+
+int
+nv41_fb_init(struct nouveau_object *object)
+{
+       struct nv41_fb_priv *priv = (void *)object;
+       int ret;
+
+       ret = nouveau_fb_init(&priv->base);
+       if (ret)
+               return ret;
+
+       nv_wr32(priv, 0x100800, 0x00000001);
+       return 0;
+}
+
+static int
+nv41_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+            struct nouveau_oclass *oclass, void *data, u32 size,
+            struct nouveau_object **pobject)
+{
+       struct nv41_fb_priv *priv;
+       int ret;
+
+       ret = nouveau_fb_create(parent, engine, oclass, &priv);
+       *pobject = nv_object(priv);
+       if (ret)
+               return ret;
+
+       priv->base.memtype_valid = nv04_fb_memtype_valid;
+       priv->base.ram.init = nv41_fb_vram_init;
+       priv->base.tile.regions = 12;
+       priv->base.tile.init = nv30_fb_tile_init;
+       priv->base.tile.comp = nv40_fb_tile_comp;
+       priv->base.tile.fini = nv20_fb_tile_fini;
+       priv->base.tile.prog = nv41_fb_tile_prog;
+       return nouveau_fb_preinit(&priv->base);
+}
+
+
+struct nouveau_oclass
+nv41_fb_oclass = {
+       .handle = NV_SUBDEV(FB, 0x41),
+       .ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = nv41_fb_ctor,
+               .dtor = _nouveau_fb_dtor,
+               .init = nv41_fb_init,
+               .fini = _nouveau_fb_fini,
+       },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c
new file mode 100644 (file)
index 0000000..ae89b50
--- /dev/null
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv44_fb_priv {
+       struct nouveau_fb base;
+};
+
+int
+nv44_fb_vram_init(struct nouveau_fb *pfb)
+{
+       u32 pfb474 = nv_rd32(pfb, 0x100474);
+       if (pfb474 & 0x00000004)
+               pfb->ram.type = NV_MEM_TYPE_GDDR3;
+       if (pfb474 & 0x00000002)
+               pfb->ram.type = NV_MEM_TYPE_DDR2;
+       if (pfb474 & 0x00000001)
+               pfb->ram.type = NV_MEM_TYPE_DDR1;
+
+       pfb->ram.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
+       return 0;
+}
+
+static void
+nv44_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
+                 u32 flags, struct nouveau_fb_tile *tile)
+{
+       tile->addr  = 0x00000001; /* mode = vram */
+       tile->addr |= addr;
+       tile->limit = max(1u, addr + size) - 1;
+       tile->pitch = pitch;
+}
+
+void
+nv44_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
+{
+       nv_wr32(pfb, 0x100604 + (i * 0x10), tile->limit);
+       nv_wr32(pfb, 0x100608 + (i * 0x10), tile->pitch);
+       nv_wr32(pfb, 0x100600 + (i * 0x10), tile->addr);
+       nv_rd32(pfb, 0x100600 + (i * 0x10));
+}
+
+int
+nv44_fb_init(struct nouveau_object *object)
+{
+       struct nv44_fb_priv *priv = (void *)object;
+       int ret;
+
+       ret = nouveau_fb_init(&priv->base);
+       if (ret)
+               return ret;
+
+       nv_wr32(priv, 0x100850, 0x80000000);
+       nv_wr32(priv, 0x100800, 0x00000001);
+       return 0;
+}
+
+static int
+nv44_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+            struct nouveau_oclass *oclass, void *data, u32 size,
+            struct nouveau_object **pobject)
+{
+       struct nv44_fb_priv *priv;
+       int ret;
+
+       ret = nouveau_fb_create(parent, engine, oclass, &priv);
+       *pobject = nv_object(priv);
+       if (ret)
+               return ret;
+
+       priv->base.memtype_valid = nv04_fb_memtype_valid;
+       priv->base.ram.init = nv44_fb_vram_init;
+       priv->base.tile.regions = 12;
+       priv->base.tile.init = nv44_fb_tile_init;
+       priv->base.tile.fini = nv20_fb_tile_fini;
+       priv->base.tile.prog = nv44_fb_tile_prog;
+       return nouveau_fb_preinit(&priv->base);
+}
+
+
+struct nouveau_oclass
+nv44_fb_oclass = {
+       .handle = NV_SUBDEV(FB, 0x44),
+       .ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = nv44_fb_ctor,
+               .dtor = _nouveau_fb_dtor,
+               .init = nv44_fb_init,
+               .fini = _nouveau_fb_fini,
+       },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c
new file mode 100644 (file)
index 0000000..589b93e
--- /dev/null
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv46_fb_priv {
+       struct nouveau_fb base;
+};
+
+void
+nv46_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
+                 u32 flags, struct nouveau_fb_tile *tile)
+{
+       /* for performance, select alternate bank offset for zeta */
+       if (!(flags & 4)) tile->addr = (0 << 3);
+       else              tile->addr = (1 << 3);
+
+       tile->addr |= 0x00000001; /* mode = vram */
+       tile->addr |= addr;
+       tile->limit = max(1u, addr + size) - 1;
+       tile->pitch = pitch;
+}
+
+static int
+nv46_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+            struct nouveau_oclass *oclass, void *data, u32 size,
+            struct nouveau_object **pobject)
+{
+       struct nv46_fb_priv *priv;
+       int ret;
+
+       ret = nouveau_fb_create(parent, engine, oclass, &priv);
+       *pobject = nv_object(priv);
+       if (ret)
+               return ret;
+
+       priv->base.memtype_valid = nv04_fb_memtype_valid;
+       priv->base.ram.init = nv44_fb_vram_init;
+       priv->base.tile.regions = 15;
+       priv->base.tile.init = nv46_fb_tile_init;
+       priv->base.tile.fini = nv20_fb_tile_fini;
+       priv->base.tile.prog = nv44_fb_tile_prog;
+       return nouveau_fb_preinit(&priv->base);
+}
+
+
+struct nouveau_oclass
+nv46_fb_oclass = {
+       .handle = NV_SUBDEV(FB, 0x46),
+       .ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = nv46_fb_ctor,
+               .dtor = _nouveau_fb_dtor,
+               .init = nv44_fb_init,
+               .fini = _nouveau_fb_fini,
+       },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c
new file mode 100644 (file)
index 0000000..818bba3
--- /dev/null
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv47_fb_priv {
+       struct nouveau_fb base;
+};
+
+static int
+nv47_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+            struct nouveau_oclass *oclass, void *data, u32 size,
+            struct nouveau_object **pobject)
+{
+       struct nv47_fb_priv *priv;
+       int ret;
+
+       ret = nouveau_fb_create(parent, engine, oclass, &priv);
+       *pobject = nv_object(priv);
+       if (ret)
+               return ret;
+
+       priv->base.memtype_valid = nv04_fb_memtype_valid;
+       priv->base.ram.init = nv41_fb_vram_init;
+       priv->base.tile.regions = 15;
+       priv->base.tile.init = nv30_fb_tile_init;
+       priv->base.tile.comp = nv40_fb_tile_comp;
+       priv->base.tile.fini = nv20_fb_tile_fini;
+       priv->base.tile.prog = nv41_fb_tile_prog;
+       return nouveau_fb_preinit(&priv->base);
+}
+
+
+struct nouveau_oclass
+nv47_fb_oclass = {
+       .handle = NV_SUBDEV(FB, 0x47),
+       .ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = nv47_fb_ctor,
+               .dtor = _nouveau_fb_dtor,
+               .init = nv41_fb_init,
+               .fini = _nouveau_fb_fini,
+       },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c
new file mode 100644 (file)
index 0000000..84a31af
--- /dev/null
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv49_fb_priv {
+       struct nouveau_fb base;
+};
+
+static int
+nv49_fb_vram_init(struct nouveau_fb *pfb)
+{
+       u32 pfb914 = nv_rd32(pfb, 0x100914);
+
+       switch (pfb914 & 0x00000003) {
+       case 0x00000000: pfb->ram.type = NV_MEM_TYPE_DDR1; break;
+       case 0x00000001: pfb->ram.type = NV_MEM_TYPE_DDR2; break;
+       case 0x00000002: pfb->ram.type = NV_MEM_TYPE_GDDR3; break;
+       case 0x00000003: break;
+       }
+
+       pfb->ram.size =   nv_rd32(pfb, 0x10020c) & 0xff000000;
+       pfb->ram.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
+       return nv_rd32(pfb, 0x100320);
+}
+
+static int
+nv49_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+            struct nouveau_oclass *oclass, void *data, u32 size,
+            struct nouveau_object **pobject)
+{
+       struct nv49_fb_priv *priv;
+       int ret;
+
+       ret = nouveau_fb_create(parent, engine, oclass, &priv);
+       *pobject = nv_object(priv);
+       if (ret)
+               return ret;
+
+       priv->base.memtype_valid = nv04_fb_memtype_valid;
+       priv->base.ram.init = nv49_fb_vram_init;
+       priv->base.tile.regions = 15;
+       priv->base.tile.init = nv30_fb_tile_init;
+       priv->base.tile.comp = nv40_fb_tile_comp;
+       priv->base.tile.fini = nv20_fb_tile_fini;
+       priv->base.tile.prog = nv41_fb_tile_prog;
+
+       return nouveau_fb_preinit(&priv->base);
+}
+
+
+struct nouveau_oclass
+nv49_fb_oclass = {
+       .handle = NV_SUBDEV(FB, 0x49),
+       .ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = nv49_fb_ctor,
+               .dtor = _nouveau_fb_dtor,
+               .init = nv41_fb_init,
+               .fini = _nouveau_fb_fini,
+       },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c
new file mode 100644 (file)
index 0000000..797fd55
--- /dev/null
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <subdev/fb.h>
+
+struct nv4e_fb_priv {
+       struct nouveau_fb base;
+};
+
+static int
+nv4e_fb_vram_init(struct nouveau_fb *pfb)
+{
+       pfb->ram.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
+       pfb->ram.type = NV_MEM_TYPE_STOLEN;
+       return 0;
+}
+
+static int
+nv4e_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+            struct nouveau_oclass *oclass, void *data, u32 size,
+            struct nouveau_object **pobject)
+{
+       struct nv4e_fb_priv *priv;
+       int ret;
+
+       ret = nouveau_fb_create(parent, engine, oclass, &priv);
+       *pobject = nv_object(priv);
+       if (ret)
+               return ret;
+
+       priv->base.memtype_valid = nv04_fb_memtype_valid;
+       priv->base.ram.init = nv4e_fb_vram_init;
+       priv->base.tile.regions = 12;
+       priv->base.tile.init = nv46_fb_tile_init;
+       priv->base.tile.fini = nv20_fb_tile_fini;
+       priv->base.tile.prog = nv44_fb_tile_prog;
+       return nouveau_fb_preinit(&priv->base);
+}
+
+struct nouveau_oclass
+nv4e_fb_oclass = {
+       .handle = NV_SUBDEV(FB, 0x4e),
+       .ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = nv4e_fb_ctor,
+               .dtor = _nouveau_fb_dtor,
+               .init = nv44_fb_init,
+               .fini = _nouveau_fb_fini,
+       },
+};
index 5f57080..487cb8c 100644 (file)
@@ -51,6 +51,101 @@ nv50_fb_memtype_valid(struct nouveau_fb *pfb, u32 memtype)
        return types[(memtype & 0xff00) >> 8] != 0;
 }
 
+static u32
+nv50_fb_vram_rblock(struct nouveau_fb *pfb)
+{
+       int i, parts, colbits, rowbitsa, rowbitsb, banks;
+       u64 rowsize, predicted;
+       u32 r0, r4, rt, ru, rblock_size;
+
+       r0 = nv_rd32(pfb, 0x100200);
+       r4 = nv_rd32(pfb, 0x100204);
+       rt = nv_rd32(pfb, 0x100250);
+       ru = nv_rd32(pfb, 0x001540);
+       nv_debug(pfb, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru);
+
+       for (i = 0, parts = 0; i < 8; i++) {
+               if (ru & (0x00010000 << i))
+                       parts++;
+       }
+
+       colbits  =  (r4 & 0x0000f000) >> 12;
+       rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
+       rowbitsb = ((r4 & 0x00f00000) >> 20) + 8;
+       banks    = 1 << (((r4 & 0x03000000) >> 24) + 2);
+
+       rowsize = parts * banks * (1 << colbits) * 8;
+       predicted = rowsize << rowbitsa;
+       if (r0 & 0x00000004)
+               predicted += rowsize << rowbitsb;
+
+       if (predicted != pfb->ram.size) {
+               nv_warn(pfb, "memory controller reports %d MiB VRAM\n",
+                       (u32)(pfb->ram.size >> 20));
+       }
+
+       rblock_size = rowsize;
+       if (rt & 1)
+               rblock_size *= 3;
+
+       nv_debug(pfb, "rblock %d bytes\n", rblock_size);
+       return rblock_size;
+}
+
+static int
+nv50_fb_vram_init(struct nouveau_fb *pfb)
+{
+       struct nouveau_device *device = nv_device(pfb);
+       struct nouveau_bios *bios = nouveau_bios(device);
+       const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
+       const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
+       u32 size;
+       int ret;
+
+       pfb->ram.size = nv_rd32(pfb, 0x10020c);
+       pfb->ram.size = (pfb->ram.size & 0xffffff00) |
+                      ((pfb->ram.size & 0x000000ff) << 32);
+
+       size = (pfb->ram.size >> 12) - rsvd_head - rsvd_tail;
+       switch (device->chipset) {
+       case 0xaa:
+       case 0xac:
+       case 0xaf: /* IGPs, no reordering, no real VRAM */
+               ret = nouveau_mm_init(&pfb->vram, rsvd_head, size, 1);
+               if (ret)
+                       return ret;
+
+               pfb->ram.type   = NV_MEM_TYPE_STOLEN;
+               pfb->ram.stolen = (u64)nv_rd32(pfb, 0x100e10) << 12;
+               break;
+       default:
+               switch (nv_rd32(pfb, 0x100714) & 0x00000007) {
+               case 0: pfb->ram.type = NV_MEM_TYPE_DDR1; break;
+               case 1:
+                       if (nouveau_fb_bios_memtype(bios) == NV_MEM_TYPE_DDR3)
+                               pfb->ram.type = NV_MEM_TYPE_DDR3;
+                       else
+                               pfb->ram.type = NV_MEM_TYPE_DDR2;
+                       break;
+               case 2: pfb->ram.type = NV_MEM_TYPE_GDDR3; break;
+               case 3: pfb->ram.type = NV_MEM_TYPE_GDDR4; break;
+               case 4: pfb->ram.type = NV_MEM_TYPE_GDDR5; break;
+               default:
+                       break;
+               }
+
+               ret = nouveau_mm_init(&pfb->vram, rsvd_head, size,
+                                     nv50_fb_vram_rblock(pfb) >> 12);
+               if (ret)
+                       return ret;
+
+               pfb->ram.ranks = (nv_rd32(pfb, 0x100200) & 0x4) ? 2 : 1;
+               break;
+       }
+
+       return nv_rd32(pfb, 0x100320);
+}
+
 static int
 nv50_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
                 u32 memtype, struct nouveau_mem **pmem)
@@ -140,195 +235,6 @@ nv50_fb_vram_del(struct nouveau_fb *pfb, struct nouveau_mem **pmem)
        kfree(mem);
 }
 
-static u32
-nv50_vram_rblock(struct nv50_fb_priv *priv)
-{
-       int i, parts, colbits, rowbitsa, rowbitsb, banks;
-       u64 rowsize, predicted;
-       u32 r0, r4, rt, ru, rblock_size;
-
-       r0 = nv_rd32(priv, 0x100200);
-       r4 = nv_rd32(priv, 0x100204);
-       rt = nv_rd32(priv, 0x100250);
-       ru = nv_rd32(priv, 0x001540);
-       nv_debug(priv, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru);
-
-       for (i = 0, parts = 0; i < 8; i++) {
-               if (ru & (0x00010000 << i))
-                       parts++;
-       }
-
-       colbits  =  (r4 & 0x0000f000) >> 12;
-       rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
-       rowbitsb = ((r4 & 0x00f00000) >> 20) + 8;
-       banks    = 1 << (((r4 & 0x03000000) >> 24) + 2);
-
-       rowsize = parts * banks * (1 << colbits) * 8;
-       predicted = rowsize << rowbitsa;
-       if (r0 & 0x00000004)
-               predicted += rowsize << rowbitsb;
-
-       if (predicted != priv->base.ram.size) {
-               nv_warn(priv, "memory controller reports %d MiB VRAM\n",
-                       (u32)(priv->base.ram.size >> 20));
-       }
-
-       rblock_size = rowsize;
-       if (rt & 1)
-               rblock_size *= 3;
-
-       nv_debug(priv, "rblock %d bytes\n", rblock_size);
-       return rblock_size;
-}
-
-static int
-nv50_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
-            struct nouveau_oclass *oclass, void *data, u32 size,
-            struct nouveau_object **pobject)
-{
-       struct nouveau_device *device = nv_device(parent);
-       struct nouveau_bios *bios = nouveau_bios(device);
-       const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
-       const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
-       struct nv50_fb_priv *priv;
-       u32 tags;
-       int ret;
-
-       ret = nouveau_fb_create(parent, engine, oclass, &priv);
-       *pobject = nv_object(priv);
-       if (ret)
-               return ret;
-
-       switch (nv_rd32(priv, 0x100714) & 0x00000007) {
-       case 0: priv->base.ram.type = NV_MEM_TYPE_DDR1; break;
-       case 1:
-               if (nouveau_fb_bios_memtype(bios) == NV_MEM_TYPE_DDR3)
-                       priv->base.ram.type = NV_MEM_TYPE_DDR3;
-               else
-                       priv->base.ram.type = NV_MEM_TYPE_DDR2;
-               break;
-       case 2: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break;
-       case 3: priv->base.ram.type = NV_MEM_TYPE_GDDR4; break;
-       case 4: priv->base.ram.type = NV_MEM_TYPE_GDDR5; break;
-       default:
-               break;
-       }
-
-       priv->base.ram.size = nv_rd32(priv, 0x10020c);
-       priv->base.ram.size = (priv->base.ram.size & 0xffffff00) |
-                            ((priv->base.ram.size & 0x000000ff) << 32);
-
-       tags = nv_rd32(priv, 0x100320);
-       ret = nouveau_mm_init(&priv->base.tags, 0, tags, 1);
-       if (ret)
-               return ret;
-
-       nv_debug(priv, "%d compression tags\n", tags);
-
-       size = (priv->base.ram.size >> 12) - rsvd_head - rsvd_tail;
-       switch (device->chipset) {
-       case 0xaa:
-       case 0xac:
-       case 0xaf: /* IGPs, no reordering, no real VRAM */
-               ret = nouveau_mm_init(&priv->base.vram, rsvd_head, size, 1);
-               if (ret)
-                       return ret;
-
-               priv->base.ram.stolen = (u64)nv_rd32(priv, 0x100e10) << 12;
-               priv->base.ram.type = NV_MEM_TYPE_STOLEN;
-               break;
-       default:
-               ret = nouveau_mm_init(&priv->base.vram, rsvd_head, size,
-                                     nv50_vram_rblock(priv) >> 12);
-               if (ret)
-                       return ret;
-
-               priv->base.ram.ranks = (nv_rd32(priv, 0x100200) & 0x4) ? 2 : 1;
-               break;
-       }
-
-       priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
-       if (priv->r100c08_page) {
-               priv->r100c08 = pci_map_page(device->pdev, priv->r100c08_page,
-                                            0, PAGE_SIZE,
-                                            PCI_DMA_BIDIRECTIONAL);
-               if (pci_dma_mapping_error(device->pdev, priv->r100c08))
-                       nv_warn(priv, "failed 0x100c08 page map\n");
-       } else {
-               nv_warn(priv, "failed 0x100c08 page alloc\n");
-       }
-
-       priv->base.memtype_valid = nv50_fb_memtype_valid;
-       priv->base.ram.get = nv50_fb_vram_new;
-       priv->base.ram.put = nv50_fb_vram_del;
-       return nouveau_fb_created(&priv->base);
-}
-
-static void
-nv50_fb_dtor(struct nouveau_object *object)
-{
-       struct nouveau_device *device = nv_device(object);
-       struct nv50_fb_priv *priv = (void *)object;
-
-       if (priv->r100c08_page) {
-               pci_unmap_page(device->pdev, priv->r100c08, PAGE_SIZE,
-                              PCI_DMA_BIDIRECTIONAL);
-               __free_page(priv->r100c08_page);
-       }
-
-       nouveau_fb_destroy(&priv->base);
-}
-
-static int
-nv50_fb_init(struct nouveau_object *object)
-{
-       struct nouveau_device *device = nv_device(object);
-       struct nv50_fb_priv *priv = (void *)object;
-       int ret;
-
-       ret = nouveau_fb_init(&priv->base);
-       if (ret)
-               return ret;
-
-       /* Not a clue what this is exactly.  Without pointing it at a
-        * scratch page, VRAM->GART blits with M2MF (as in DDX DFS)
-        * cause IOMMU "read from address 0" errors (rh#561267)
-        */
-       nv_wr32(priv, 0x100c08, priv->r100c08 >> 8);
-
-       /* This is needed to get meaningful information from 100c90
-        * on traps. No idea what these values mean exactly. */
-       switch (device->chipset) {
-       case 0x50:
-               nv_wr32(priv, 0x100c90, 0x000707ff);
-               break;
-       case 0xa3:
-       case 0xa5:
-       case 0xa8:
-               nv_wr32(priv, 0x100c90, 0x000d0fff);
-               break;
-       case 0xaf:
-               nv_wr32(priv, 0x100c90, 0x089d1fff);
-               break;
-       default:
-               nv_wr32(priv, 0x100c90, 0x001d07ff);
-               break;
-       }
-
-       return 0;
-}
-
-struct nouveau_oclass
-nv50_fb_oclass = {
-       .handle = NV_SUBDEV(FB, 0x50),
-       .ofuncs = &(struct nouveau_ofuncs) {
-               .ctor = nv50_fb_ctor,
-               .dtor = nv50_fb_dtor,
-               .init = nv50_fb_init,
-               .fini = _nouveau_fb_fini,
-       },
-};
-
 static const struct nouveau_enum vm_dispatch_subclients[] = {
        { 0x00000000, "GRCTX", NULL },
        { 0x00000001, "NOTIFY", NULL },
@@ -424,11 +330,11 @@ static const struct nouveau_enum vm_fault[] = {
        {}
 };
 
-void
-nv50_fb_trap(struct nouveau_fb *pfb, int display)
+static void
+nv50_fb_intr(struct nouveau_subdev *subdev)
 {
-       struct nouveau_device *device = nv_device(pfb);
-       struct nv50_fb_priv *priv = (void *)pfb;
+       struct nouveau_device *device = nv_device(subdev);
+       struct nv50_fb_priv *priv = (void *)subdev;
        const struct nouveau_enum *en, *cl;
        u32 trap[6], idx, chan;
        u8 st0, st1, st2, st3;
@@ -445,9 +351,6 @@ nv50_fb_trap(struct nouveau_fb *pfb, int display)
        }
        nv_wr32(priv, 0x100c90, idx | 0x80000000);
 
-       if (!display)
-               return;
-
        /* decode status bits into something more useful */
        if (device->chipset  < 0xa3 ||
            device->chipset == 0xaa || device->chipset == 0xac) {
@@ -494,3 +397,101 @@ nv50_fb_trap(struct nouveau_fb *pfb, int display)
        else
                printk("0x%08x\n", st1);
 }
+
+static int
+nv50_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+            struct nouveau_oclass *oclass, void *data, u32 size,
+            struct nouveau_object **pobject)
+{
+       struct nouveau_device *device = nv_device(parent);
+       struct nv50_fb_priv *priv;
+       int ret;
+
+       ret = nouveau_fb_create(parent, engine, oclass, &priv);
+       *pobject = nv_object(priv);
+       if (ret)
+               return ret;
+
+       priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+       if (priv->r100c08_page) {
+               priv->r100c08 = pci_map_page(device->pdev, priv->r100c08_page,
+                                            0, PAGE_SIZE,
+                                            PCI_DMA_BIDIRECTIONAL);
+               if (pci_dma_mapping_error(device->pdev, priv->r100c08))
+                       nv_warn(priv, "failed 0x100c08 page map\n");
+       } else {
+               nv_warn(priv, "failed 0x100c08 page alloc\n");
+       }
+
+       priv->base.memtype_valid = nv50_fb_memtype_valid;
+       priv->base.ram.init = nv50_fb_vram_init;
+       priv->base.ram.get = nv50_fb_vram_new;
+       priv->base.ram.put = nv50_fb_vram_del;
+       nv_subdev(priv)->intr = nv50_fb_intr;
+       return nouveau_fb_preinit(&priv->base);
+}
+
+static void
+nv50_fb_dtor(struct nouveau_object *object)
+{
+       struct nouveau_device *device = nv_device(object);
+       struct nv50_fb_priv *priv = (void *)object;
+
+       if (priv->r100c08_page) {
+               pci_unmap_page(device->pdev, priv->r100c08, PAGE_SIZE,
+                              PCI_DMA_BIDIRECTIONAL);
+               __free_page(priv->r100c08_page);
+       }
+
+       nouveau_fb_destroy(&priv->base);
+}
+
+static int
+nv50_fb_init(struct nouveau_object *object)
+{
+       struct nouveau_device *device = nv_device(object);
+       struct nv50_fb_priv *priv = (void *)object;
+       int ret;
+
+       ret = nouveau_fb_init(&priv->base);
+       if (ret)
+               return ret;
+
+       /* Not a clue what this is exactly.  Without pointing it at a
+        * scratch page, VRAM->GART blits with M2MF (as in DDX DFS)
+        * cause IOMMU "read from address 0" errors (rh#561267)
+        */
+       nv_wr32(priv, 0x100c08, priv->r100c08 >> 8);
+
+       /* This is needed to get meaningful information from 100c90
+        * on traps. No idea what these values mean exactly. */
+       switch (device->chipset) {
+       case 0x50:
+               nv_wr32(priv, 0x100c90, 0x000707ff);
+               break;
+       case 0xa3:
+       case 0xa5:
+       case 0xa8:
+               nv_wr32(priv, 0x100c90, 0x000d0fff);
+               break;
+       case 0xaf:
+               nv_wr32(priv, 0x100c90, 0x089d1fff);
+               break;
+       default:
+               nv_wr32(priv, 0x100c90, 0x001d07ff);
+               break;
+       }
+
+       return 0;
+}
+
+struct nouveau_oclass
+nv50_fb_oclass = {
+       .handle = NV_SUBDEV(FB, 0x50),
+       .ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = nv50_fb_ctor,
+               .dtor = nv50_fb_dtor,
+               .init = nv50_fb_init,
+               .fini = _nouveau_fb_fini,
+       },
+};
index 9f59f2b..306bdf1 100644 (file)
@@ -62,6 +62,65 @@ nvc0_fb_memtype_valid(struct nouveau_fb *pfb, u32 tile_flags)
 }
 
 static int
+nvc0_fb_vram_init(struct nouveau_fb *pfb)
+{
+       struct nouveau_bios *bios = nouveau_bios(pfb);
+       const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
+       const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
+       u32 parts = nv_rd32(pfb, 0x022438);
+       u32 pmask = nv_rd32(pfb, 0x022554);
+       u32 bsize = nv_rd32(pfb, 0x10f20c);
+       u32 offset, length;
+       bool uniform = true;
+       int ret, part;
+
+       nv_debug(pfb, "0x100800: 0x%08x\n", nv_rd32(pfb, 0x100800));
+       nv_debug(pfb, "parts 0x%08x mask 0x%08x\n", parts, pmask);
+
+       pfb->ram.type = nouveau_fb_bios_memtype(bios);
+       pfb->ram.ranks = (nv_rd32(pfb, 0x10f200) & 0x00000004) ? 2 : 1;
+
+       /* read amount of vram attached to each memory controller */
+       for (part = 0; part < parts; part++) {
+               if (!(pmask & (1 << part))) {
+                       u32 psize = nv_rd32(pfb, 0x11020c + (part * 0x1000));
+                       if (psize != bsize) {
+                               if (psize < bsize)
+                                       bsize = psize;
+                               uniform = false;
+                       }
+
+                       nv_debug(pfb, "%d: mem_amount 0x%08x\n", part, psize);
+                       pfb->ram.size += (u64)psize << 20;
+               }
+       }
+
+       /* if all controllers have the same amount attached, there's no holes */
+       if (uniform) {
+               offset = rsvd_head;
+               length = (pfb->ram.size >> 12) - rsvd_head - rsvd_tail;
+               return nouveau_mm_init(&pfb->vram, offset, length, 1);
+       }
+
+       /* otherwise, address lowest common amount from 0GiB */
+       ret = nouveau_mm_init(&pfb->vram, rsvd_head, (bsize << 8) * parts, 1);
+       if (ret)
+               return ret;
+
+       /* and the rest starting from (8GiB + common_size) */
+       offset = (0x0200000000ULL >> 12) + (bsize << 8);
+       length = (pfb->ram.size >> 12) - (bsize << 8) - rsvd_tail;
+
+       ret = nouveau_mm_init(&pfb->vram, offset, length, 0);
+       if (ret) {
+               nouveau_mm_fini(&pfb->vram);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int
 nvc0_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
                 u32 memtype, struct nouveau_mem **pmem)
 {
@@ -139,66 +198,6 @@ nvc0_fb_dtor(struct nouveau_object *object)
 }
 
 static int
-nvc0_vram_detect(struct nvc0_fb_priv *priv)
-{
-       struct nouveau_bios *bios = nouveau_bios(priv);
-       struct nouveau_fb *pfb = &priv->base;
-       const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
-       const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
-       u32 parts = nv_rd32(priv, 0x022438);
-       u32 pmask = nv_rd32(priv, 0x022554);
-       u32 bsize = nv_rd32(priv, 0x10f20c);
-       u32 offset, length;
-       bool uniform = true;
-       int ret, part;
-
-       nv_debug(priv, "0x100800: 0x%08x\n", nv_rd32(priv, 0x100800));
-       nv_debug(priv, "parts 0x%08x mask 0x%08x\n", parts, pmask);
-
-       priv->base.ram.type = nouveau_fb_bios_memtype(bios);
-       priv->base.ram.ranks = (nv_rd32(priv, 0x10f200) & 0x00000004) ? 2 : 1;
-
-       /* read amount of vram attached to each memory controller */
-       for (part = 0; part < parts; part++) {
-               if (!(pmask & (1 << part))) {
-                       u32 psize = nv_rd32(priv, 0x11020c + (part * 0x1000));
-                       if (psize != bsize) {
-                               if (psize < bsize)
-                                       bsize = psize;
-                               uniform = false;
-                       }
-
-                       nv_debug(priv, "%d: mem_amount 0x%08x\n", part, psize);
-                       priv->base.ram.size += (u64)psize << 20;
-               }
-       }
-
-       /* if all controllers have the same amount attached, there's no holes */
-       if (uniform) {
-               offset = rsvd_head;
-               length = (priv->base.ram.size >> 12) - rsvd_head - rsvd_tail;
-               return nouveau_mm_init(&pfb->vram, offset, length, 1);
-       }
-
-       /* otherwise, address lowest common amount from 0GiB */
-       ret = nouveau_mm_init(&pfb->vram, rsvd_head, (bsize << 8) * parts, 1);
-       if (ret)
-               return ret;
-
-       /* and the rest starting from (8GiB + common_size) */
-       offset = (0x0200000000ULL >> 12) + (bsize << 8);
-       length = (priv->base.ram.size >> 12) - (bsize << 8) - rsvd_tail;
-
-       ret = nouveau_mm_init(&pfb->vram, offset, length, 0);
-       if (ret) {
-               nouveau_mm_fini(&pfb->vram);
-               return ret;
-       }
-
-       return 0;
-}
-
-static int
 nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
             struct nouveau_oclass *oclass, void *data, u32 size,
             struct nouveau_object **pobject)
@@ -213,13 +212,10 @@ nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
                return ret;
 
        priv->base.memtype_valid = nvc0_fb_memtype_valid;
+       priv->base.ram.init = nvc0_fb_vram_init;
        priv->base.ram.get = nvc0_fb_vram_new;
        priv->base.ram.put = nv50_fb_vram_del;
 
-       ret = nvc0_vram_detect(priv);
-       if (ret)
-               return ret;
-
        priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
        if (!priv->r100c10_page)
                return -ENOMEM;
@@ -229,7 +225,7 @@ nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        if (pci_dma_mapping_error(device->pdev, priv->r100c10))
                return -EFAULT;
 
-       return nouveau_fb_created(&priv->base);
+       return nouveau_fb_preinit(&priv->base);
 }
 
 
index fe1ebf1..dc27e79 100644 (file)
@@ -50,7 +50,7 @@ auxch_init(struct nouveau_i2c *aux, int ch)
                ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
                udelay(1);
                if (!timeout--) {
-                       AUX_ERR("begin idle timeout 0x%08x", ctrl);
+                       AUX_ERR("begin idle timeout 0x%08x\n", ctrl);
                        return -EBUSY;
                }
        } while (ctrl & 0x03010000);
index ba4d28b..f5bbd38 100644 (file)
@@ -63,14 +63,14 @@ nv04_instobj_dtor(struct nouveau_object *object)
 }
 
 static u32
-nv04_instobj_rd32(struct nouveau_object *object, u32 addr)
+nv04_instobj_rd32(struct nouveau_object *object, u64 addr)
 {
        struct nv04_instobj_priv *node = (void *)object;
        return nv_ro32(object->engine, node->mem->offset + addr);
 }
 
 static void
-nv04_instobj_wr32(struct nouveau_object *object, u32 addr, u32 data)
+nv04_instobj_wr32(struct nouveau_object *object, u64 addr, u32 data)
 {
        struct nv04_instobj_priv *node = (void *)object;
        nv_wo32(object->engine, node->mem->offset + addr, data);
@@ -173,13 +173,13 @@ nv04_instmem_dtor(struct nouveau_object *object)
 }
 
 static u32
-nv04_instmem_rd32(struct nouveau_object *object, u32 addr)
+nv04_instmem_rd32(struct nouveau_object *object, u64 addr)
 {
        return nv_rd32(object, 0x700000 + addr);
 }
 
 static void
-nv04_instmem_wr32(struct nouveau_object *object, u32 addr, u32 data)
+nv04_instmem_wr32(struct nouveau_object *object, u64 addr, u32 data)
 {
        return nv_wr32(object, 0x700000 + addr, data);
 }
index 73c52eb..da64253 100644 (file)
@@ -111,14 +111,14 @@ nv40_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
 }
 
 static u32
-nv40_instmem_rd32(struct nouveau_object *object, u32 addr)
+nv40_instmem_rd32(struct nouveau_object *object, u64 addr)
 {
        struct nv04_instmem_priv *priv = (void *)object;
        return ioread32_native(priv->iomem + addr);
 }
 
 static void
-nv40_instmem_wr32(struct nouveau_object *object, u32 addr, u32 data)
+nv40_instmem_wr32(struct nouveau_object *object, u64 addr, u32 data)
 {
        struct nv04_instmem_priv *priv = (void *)object;
        iowrite32_native(data, priv->iomem + addr);
index 27ef089..cfc7e31 100644 (file)
@@ -76,7 +76,7 @@ nv50_instobj_dtor(struct nouveau_object *object)
 }
 
 static u32
-nv50_instobj_rd32(struct nouveau_object *object, u32 offset)
+nv50_instobj_rd32(struct nouveau_object *object, u64 offset)
 {
        struct nv50_instmem_priv *priv = (void *)object->engine;
        struct nv50_instobj_priv *node = (void *)object;
@@ -96,7 +96,7 @@ nv50_instobj_rd32(struct nouveau_object *object, u32 offset)
 }
 
 static void
-nv50_instobj_wr32(struct nouveau_object *object, u32 offset, u32 data)
+nv50_instobj_wr32(struct nouveau_object *object, u64 offset, u32 data)
 {
        struct nv50_instmem_priv *priv = (void *)object->engine;
        struct nv50_instobj_priv *node = (void *)object;
index de5721c..8379aaf 100644 (file)
@@ -30,20 +30,20 @@ nouveau_mc_intr(struct nouveau_subdev *subdev)
        struct nouveau_mc *pmc = nouveau_mc(subdev);
        const struct nouveau_mc_intr *map = pmc->intr_map;
        struct nouveau_subdev *unit;
-       u32 stat;
+       u32 stat, intr;
 
-       stat = nv_rd32(pmc, 0x000100);
+       intr = stat = nv_rd32(pmc, 0x000100);
        while (stat && map->stat) {
                if (stat & map->stat) {
                        unit = nouveau_subdev(subdev, map->unit);
                        if (unit && unit->intr)
                                unit->intr(unit);
-                       stat &= ~map->stat;
+                       intr &= ~map->stat;
                }
                map++;
        }
 
-       if (stat) {
+       if (intr) {
                nv_error(pmc, "unknown intr 0x%08x\n", stat);
        }
 }
index cedf33b..8d759f8 100644 (file)
@@ -39,6 +39,7 @@ nv50_mc_intr[] = {
        { 0x00200000, NVDEV_SUBDEV_GPIO },
        { 0x04000000, NVDEV_ENGINE_DISP },
        { 0x80000000, NVDEV_ENGINE_SW },
+       { 0x0000d101, NVDEV_SUBDEV_FB },
        {},
 };
 
index a001e4c..ceb5c83 100644 (file)
@@ -40,6 +40,7 @@ nv98_mc_intr[] = {
        { 0x00400000, NVDEV_ENGINE_COPY0 },     /* NVA3-     */
        { 0x04000000, NVDEV_ENGINE_DISP },
        { 0x80000000, NVDEV_ENGINE_SW },
+       { 0x0040d101, NVDEV_SUBDEV_FB },
        {},
 };
 
index c2b81e3..9279668 100644 (file)
@@ -36,6 +36,7 @@ nvc0_mc_intr[] = {
        { 0x00000100, NVDEV_ENGINE_FIFO },
        { 0x00001000, NVDEV_ENGINE_GR },
        { 0x00008000, NVDEV_ENGINE_BSP },
+       { 0x00020000, NVDEV_ENGINE_VP },
        { 0x00100000, NVDEV_SUBDEV_TIMER },
        { 0x00200000, NVDEV_SUBDEV_GPIO },
        { 0x02000000, NVDEV_SUBDEV_LTCG },
index cbf1fc6..4124192 100644 (file)
@@ -246,14 +246,26 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
                return nouveau_abi16_put(abi16, -ENODEV);
 
        client = nv_client(abi16->client);
-
-       if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
-               return nouveau_abi16_put(abi16, -EINVAL);
-
        device = nv_device(abi16->device);
        imem   = nouveau_instmem(device);
        pfb    = nouveau_fb(device);
 
+       /* hack to allow channel engine type specification on kepler */
+       if (device->card_type >= NV_E0) {
+               if (init->fb_ctxdma_handle != ~0)
+                       init->fb_ctxdma_handle = NVE0_CHANNEL_IND_ENGINE_GR;
+               else
+                       init->fb_ctxdma_handle = init->tt_ctxdma_handle;
+
+               /* allow flips to be executed if this is a graphics channel */
+               init->tt_ctxdma_handle = 0;
+               if (init->fb_ctxdma_handle == NVE0_CHANNEL_IND_ENGINE_GR)
+                       init->tt_ctxdma_handle = 1;
+       }
+
+       if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
+               return nouveau_abi16_put(abi16, -EINVAL);
+
        /* allocate "abi16 channel" data and make up a handle for it */
        init->channel = ffsll(~abi16->handles);
        if (!init->channel--)
@@ -268,11 +280,6 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
        abi16->handles |= (1 << init->channel);
 
        /* create channel object and initialise dma and fence management */
-       if (device->card_type >= NV_E0) {
-               init->fb_ctxdma_handle = NVE0_CHANNEL_IND_ENGINE_GR;
-               init->tt_ctxdma_handle = 0;
-       }
-
        ret = nouveau_channel_new(drm, cli, NVDRM_DEVICE, NVDRM_CHAN |
                                  init->channel, init->fb_ctxdma_handle,
                                  init->tt_ctxdma_handle, &chan->chan);
@@ -382,7 +389,7 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
        struct nouveau_abi16_chan *chan, *temp;
        struct nouveau_abi16_ntfy *ntfy;
        struct nouveau_object *object;
-       struct nv_dma_class args;
+       struct nv_dma_class args = {};
        int ret;
 
        if (unlikely(!abi16))
index 48783e1..d97f200 100644 (file)
@@ -35,6 +35,14 @@ static struct nouveau_dsm_priv {
        acpi_handle rom_handle;
 } nouveau_dsm_priv;
 
+bool nouveau_is_optimus(void) {
+       return nouveau_dsm_priv.optimus_detected;
+}
+
+bool nouveau_is_v1_dsm(void) {
+       return nouveau_dsm_priv.dsm_detected;
+}
+
 #define NOUVEAU_DSM_HAS_MUX 0x1
 #define NOUVEAU_DSM_HAS_OPT 0x2
 
@@ -183,9 +191,7 @@ static int nouveau_dsm_set_discrete_state(acpi_handle handle, enum vga_switchero
 
 static int nouveau_dsm_switchto(enum vga_switcheroo_client_id id)
 {
-       /* perhaps the _DSM functions are mutually exclusive, but prepare for
-        * the future */
-       if (!nouveau_dsm_priv.dsm_detected && nouveau_dsm_priv.optimus_detected)
+       if (!nouveau_dsm_priv.dsm_detected)
                return 0;
        if (id == VGA_SWITCHEROO_IGD)
                return nouveau_dsm_switch_mux(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_LED_STAMINA);
@@ -201,7 +207,7 @@ static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id,
 
        /* Optimus laptops have the card already disabled in
         * nouveau_switcheroo_set_state */
-       if (!nouveau_dsm_priv.dsm_detected && nouveau_dsm_priv.optimus_detected)
+       if (!nouveau_dsm_priv.dsm_detected)
                return 0;
 
        return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dhandle, state);
@@ -283,24 +289,24 @@ static bool nouveau_dsm_detect(void)
                        has_optimus = 1;
        }
 
-       if (vga_count == 2 && has_dsm && guid_valid) {
+       /* find the optimus DSM or the old v1 DSM */
+       if (has_optimus == 1) {
                acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME,
                        &buffer);
-               printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n",
+               printk(KERN_INFO "VGA switcheroo: detected Optimus DSM method %s handle\n",
                        acpi_method_name);
-               nouveau_dsm_priv.dsm_detected = true;
+               nouveau_dsm_priv.optimus_detected = true;
                ret = true;
-       }
-
-       if (has_optimus == 1) {
+       } else if (vga_count == 2 && has_dsm && guid_valid) {
                acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME,
                        &buffer);
-               printk(KERN_INFO "VGA switcheroo: detected Optimus DSM method %s handle\n",
+               printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n",
                        acpi_method_name);
-               nouveau_dsm_priv.optimus_detected = true;
+               nouveau_dsm_priv.dsm_detected = true;
                ret = true;
        }
 
+
        return ret;
 }
 
index 08af677..d0da230 100644 (file)
@@ -4,6 +4,8 @@
 #define ROM_BIOS_PAGE 4096
 
 #if defined(CONFIG_ACPI)
+bool nouveau_is_optimus(void);
+bool nouveau_is_v1_dsm(void);
 void nouveau_register_dsm_handler(void);
 void nouveau_unregister_dsm_handler(void);
 void nouveau_switcheroo_optimus_dsm(void);
@@ -11,6 +13,8 @@ int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
 bool nouveau_acpi_rom_supported(struct pci_dev *pdev);
 void *nouveau_acpi_edid(struct drm_device *, struct drm_connector *);
 #else
+static inline bool nouveau_is_optimus(void) { return false; };
+static inline bool nouveau_is_v1_dsm(void) { return false; };
 static inline void nouveau_register_dsm_handler(void) {}
 static inline void nouveau_unregister_dsm_handler(void) {}
 static inline void nouveau_switcheroo_optimus_dsm(void) {}
index 09fdef2..865eddf 100644 (file)
@@ -624,206 +624,6 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b
        return 0;
 }
 
-/* BIT 'U'/'d' table encoder subtables have hashes matching them to
- * a particular set of encoders.
- *
- * This function returns true if a particular DCB entry matches.
- */
-bool
-bios_encoder_match(struct dcb_output *dcb, u32 hash)
-{
-       if ((hash & 0x000000f0) != (dcb->location << 4))
-               return false;
-       if ((hash & 0x0000000f) != dcb->type)
-               return false;
-       if (!(hash & (dcb->or << 16)))
-               return false;
-
-       switch (dcb->type) {
-       case DCB_OUTPUT_TMDS:
-       case DCB_OUTPUT_LVDS:
-       case DCB_OUTPUT_DP:
-               if (hash & 0x00c00000) {
-                       if (!(hash & (dcb->sorconf.link << 22)))
-                               return false;
-               }
-       default:
-               return true;
-       }
-}
-
-int
-nouveau_bios_run_display_table(struct drm_device *dev, u16 type, int pclk,
-                              struct dcb_output *dcbent, int crtc)
-{
-       /*
-        * The display script table is located by the BIT 'U' table.
-        *
-        * It contains an array of pointers to various tables describing
-        * a particular output type.  The first 32-bits of the output
-        * tables contains similar information to a DCB entry, and is
-        * used to decide whether that particular table is suitable for
-        * the output you want to access.
-        *
-        * The "record header length" field here seems to indicate the
-        * offset of the first configuration entry in the output tables.
-        * This is 10 on most cards I've seen, but 12 has been witnessed
-        * on DP cards, and there's another script pointer within the
-        * header.
-        *
-        * offset + 0   ( 8 bits): version
-        * offset + 1   ( 8 bits): header length
-        * offset + 2   ( 8 bits): record length
-        * offset + 3   ( 8 bits): number of records
-        * offset + 4   ( 8 bits): record header length
-        * offset + 5   (16 bits): pointer to first output script table
-        */
-
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nvbios *bios = &drm->vbios;
-       uint8_t *table = &bios->data[bios->display.script_table_ptr];
-       uint8_t *otable = NULL;
-       uint16_t script;
-       int i;
-
-       if (!bios->display.script_table_ptr) {
-               NV_ERROR(drm, "No pointer to output script table\n");
-               return 1;
-       }
-
-       /*
-        * Nothing useful has been in any of the pre-2.0 tables I've seen,
-        * so until they are, we really don't need to care.
-        */
-       if (table[0] < 0x20)
-               return 1;
-
-       if (table[0] != 0x20 && table[0] != 0x21) {
-               NV_ERROR(drm, "Output script table version 0x%02x unknown\n",
-                        table[0]);
-               return 1;
-       }
-
-       /*
-        * The output script tables describing a particular output type
-        * look as follows:
-        *
-        * offset + 0   (32 bits): output this table matches (hash of DCB)
-        * offset + 4   ( 8 bits): unknown
-        * offset + 5   ( 8 bits): number of configurations
-        * offset + 6   (16 bits): pointer to some script
-        * offset + 8   (16 bits): pointer to some script
-        *
-        * headerlen == 10
-        * offset + 10           : configuration 0
-        *
-        * headerlen == 12
-        * offset + 10           : pointer to some script
-        * offset + 12           : configuration 0
-        *
-        * Each config entry is as follows:
-        *
-        * offset + 0   (16 bits): unknown, assumed to be a match value
-        * offset + 2   (16 bits): pointer to script table (clock set?)
-        * offset + 4   (16 bits): pointer to script table (reset?)
-        *
-        * There doesn't appear to be a count value to say how many
-        * entries exist in each script table, instead, a 0 value in
-        * the first 16-bit word seems to indicate both the end of the
-        * list and the default entry.  The second 16-bit word in the
-        * script tables is a pointer to the script to execute.
-        */
-
-       NV_DEBUG(drm, "Searching for output entry for %d %d %d\n",
-                       dcbent->type, dcbent->location, dcbent->or);
-       for (i = 0; i < table[3]; i++) {
-               otable = ROMPTR(dev, table[table[1] + (i * table[2])]);
-               if (otable && bios_encoder_match(dcbent, ROM32(otable[0])))
-                       break;
-       }
-
-       if (!otable) {
-               NV_DEBUG(drm, "failed to match any output table\n");
-               return 1;
-       }
-
-       if (pclk < -2 || pclk > 0) {
-               /* Try to find matching script table entry */
-               for (i = 0; i < otable[5]; i++) {
-                       if (ROM16(otable[table[4] + i*6]) == type)
-                               break;
-               }
-
-               if (i == otable[5]) {
-                       NV_ERROR(drm, "Table 0x%04x not found for %d/%d, "
-                                     "using first\n",
-                                type, dcbent->type, dcbent->or);
-                       i = 0;
-               }
-       }
-
-       if (pclk == 0) {
-               script = ROM16(otable[6]);
-               if (!script) {
-                       NV_DEBUG(drm, "output script 0 not found\n");
-                       return 1;
-               }
-
-               NV_DEBUG(drm, "0x%04X: parsing output script 0\n", script);
-               nouveau_bios_run_init_table(dev, script, dcbent, crtc);
-       } else
-       if (pclk == -1) {
-               script = ROM16(otable[8]);
-               if (!script) {
-                       NV_DEBUG(drm, "output script 1 not found\n");
-                       return 1;
-               }
-
-               NV_DEBUG(drm, "0x%04X: parsing output script 1\n", script);
-               nouveau_bios_run_init_table(dev, script, dcbent, crtc);
-       } else
-       if (pclk == -2) {
-               if (table[4] >= 12)
-                       script = ROM16(otable[10]);
-               else
-                       script = 0;
-               if (!script) {
-                       NV_DEBUG(drm, "output script 2 not found\n");
-                       return 1;
-               }
-
-               NV_DEBUG(drm, "0x%04X: parsing output script 2\n", script);
-               nouveau_bios_run_init_table(dev, script, dcbent, crtc);
-       } else
-       if (pclk > 0) {
-               script = ROM16(otable[table[4] + i*6 + 2]);
-               if (script)
-                       script = clkcmptable(bios, script, pclk);
-               if (!script) {
-                       NV_DEBUG(drm, "clock script 0 not found\n");
-                       return 1;
-               }
-
-               NV_DEBUG(drm, "0x%04X: parsing clock script 0\n", script);
-               nouveau_bios_run_init_table(dev, script, dcbent, crtc);
-       } else
-       if (pclk < 0) {
-               script = ROM16(otable[table[4] + i*6 + 4]);
-               if (script)
-                       script = clkcmptable(bios, script, -pclk);
-               if (!script) {
-                       NV_DEBUG(drm, "clock script 1 not found\n");
-                       return 1;
-               }
-
-               NV_DEBUG(drm, "0x%04X: parsing clock script 1\n", script);
-               nouveau_bios_run_init_table(dev, script, dcbent, crtc);
-       }
-
-       return 0;
-}
-
-
 int run_tmds_table(struct drm_device *dev, struct dcb_output *dcbent, int head, int pxclk)
 {
        /*
@@ -1212,31 +1012,6 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
        return 0;
 }
 
-static int
-parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios,
-                     struct bit_entry *bitentry)
-{
-       /*
-        * Parses the pointer to the G80 output script tables
-        *
-        * Starting at bitentry->offset:
-        *
-        * offset + 0  (16 bits): output script table pointer
-        */
-
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       uint16_t outputscripttableptr;
-
-       if (bitentry->length != 3) {
-               NV_ERROR(drm, "Do not understand BIT U table\n");
-               return -EINVAL;
-       }
-
-       outputscripttableptr = ROM16(bios->data[bitentry->offset]);
-       bios->display.script_table_ptr = outputscripttableptr;
-       return 0;
-}
-
 struct bit_table {
        const char id;
        int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
@@ -1313,7 +1088,6 @@ parse_bit_structure(struct nvbios *bios, const uint16_t bitoffset)
        parse_bit_table(bios, bitoffset, &BIT_TABLE('M', M)); /* memory? */
        parse_bit_table(bios, bitoffset, &BIT_TABLE('L', lvds));
        parse_bit_table(bios, bitoffset, &BIT_TABLE('T', tmds));
-       parse_bit_table(bios, bitoffset, &BIT_TABLE('U', U));
 
        return 0;
 }
@@ -2324,7 +2098,7 @@ nouveau_run_vbios_init(struct drm_device *dev)
 {
        struct nouveau_drm *drm = nouveau_drm(dev);
        struct nvbios *bios = &drm->vbios;
-       int i, ret = 0;
+       int ret = 0;
 
        /* Reset the BIOS head to 0. */
        bios->state.crtchead = 0;
@@ -2337,13 +2111,6 @@ nouveau_run_vbios_init(struct drm_device *dev)
                bios->fp.lvds_init_run = false;
        }
 
-       if (nv_device(drm->device)->card_type >= NV_50) {
-               for (i = 0; bios->execute && i < bios->dcb.entries; i++) {
-                       nouveau_bios_run_display_table(dev, 0, 0,
-                                                      &bios->dcb.entry[i], -1);
-               }
-       }
-
        return ret;
 }
 
index 3befbb8..f68c54c 100644 (file)
@@ -128,12 +128,6 @@ struct nvbios {
        } state;
 
        struct {
-               struct dcb_output *output;
-               int crtc;
-               uint16_t script_table_ptr;
-       } display;
-
-       struct {
                uint16_t fptablepointer;        /* also used by tmds */
                uint16_t fpxlatetableptr;
                int xlatwidth;
@@ -185,8 +179,6 @@ void nouveau_bios_takedown(struct drm_device *dev);
 int nouveau_run_vbios_init(struct drm_device *);
 struct dcb_connector_table_entry *
 nouveau_bios_connector_entry(struct drm_device *, int index);
-int nouveau_bios_run_display_table(struct drm_device *, u16 id, int clk,
-                                         struct dcb_output *, int crtc);
 bool nouveau_bios_fp_mode(struct drm_device *, struct drm_display_mode *);
 uint8_t *nouveau_bios_embedded_edid(struct drm_device *);
 int nouveau_bios_parse_lvds_table(struct drm_device *, int pxclk,
@@ -195,6 +187,5 @@ int run_tmds_table(struct drm_device *, struct dcb_output *,
                          int head, int pxclk);
 int call_lvds_script(struct drm_device *, struct dcb_output *, int head,
                            enum LVDS_script, int pxclk);
-bool bios_encoder_match(struct dcb_output *, u32 hash);
 
 #endif
index 35ac57f..5614c89 100644 (file)
@@ -225,7 +225,7 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
 
        ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size,
                          type, &nvbo->placement,
-                         align >> PAGE_SHIFT, 0, false, NULL, acc_size, sg,
+                         align >> PAGE_SHIFT, false, NULL, acc_size, sg,
                          nouveau_bo_del_ttm);
        if (ret) {
                /* ttm will call nouveau_bo_del_ttm if it fails.. */
@@ -315,7 +315,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
 
        nouveau_bo_placement_set(nvbo, memtype, 0);
 
-       ret = nouveau_bo_validate(nvbo, false, false, false);
+       ret = nouveau_bo_validate(nvbo, false, false);
        if (ret == 0) {
                switch (bo->mem.mem_type) {
                case TTM_PL_VRAM:
@@ -351,7 +351,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
 
        nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
 
-       ret = nouveau_bo_validate(nvbo, false, false, false);
+       ret = nouveau_bo_validate(nvbo, false, false);
        if (ret == 0) {
                switch (bo->mem.mem_type) {
                case TTM_PL_VRAM:
@@ -392,12 +392,12 @@ nouveau_bo_unmap(struct nouveau_bo *nvbo)
 
 int
 nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
-                   bool no_wait_reserve, bool no_wait_gpu)
+                   bool no_wait_gpu)
 {
        int ret;
 
-       ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, interruptible,
-                             no_wait_reserve, no_wait_gpu);
+       ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
+                             interruptible, no_wait_gpu);
        if (ret)
                return ret;
 
@@ -556,8 +556,7 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
 static int
 nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
                              struct nouveau_bo *nvbo, bool evict,
-                             bool no_wait_reserve, bool no_wait_gpu,
-                             struct ttm_mem_reg *new_mem)
+                             bool no_wait_gpu, struct ttm_mem_reg *new_mem)
 {
        struct nouveau_fence *fence = NULL;
        int ret;
@@ -566,8 +565,8 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
        if (ret)
                return ret;
 
-       ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict,
-                                       no_wait_reserve, no_wait_gpu, new_mem);
+       ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, evict,
+                                       no_wait_gpu, new_mem);
        nouveau_fence_unref(&fence);
        return ret;
 }
@@ -965,8 +964,7 @@ nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo,
 
 static int
 nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
-                    bool no_wait_reserve, bool no_wait_gpu,
-                    struct ttm_mem_reg *new_mem)
+                    bool no_wait_gpu, struct ttm_mem_reg *new_mem)
 {
        struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
        struct nouveau_channel *chan = chan = drm->channel;
@@ -995,7 +993,6 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
        ret = drm->ttm.move(chan, bo, &bo->mem, new_mem);
        if (ret == 0) {
                ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
-                                                   no_wait_reserve,
                                                    no_wait_gpu, new_mem);
        }
 
@@ -1064,8 +1061,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
 
 static int
 nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
-                     bool no_wait_reserve, bool no_wait_gpu,
-                     struct ttm_mem_reg *new_mem)
+                     bool no_wait_gpu, struct ttm_mem_reg *new_mem)
 {
        u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
        struct ttm_placement placement;
@@ -1078,7 +1074,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
 
        tmp_mem = *new_mem;
        tmp_mem.mm_node = NULL;
-       ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
+       ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
        if (ret)
                return ret;
 
@@ -1086,11 +1082,11 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
        if (ret)
                goto out;
 
-       ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
+       ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_mem);
        if (ret)
                goto out;
 
-       ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
+       ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
 out:
        ttm_bo_mem_put(bo, &tmp_mem);
        return ret;
@@ -1098,8 +1094,7 @@ out:
 
 static int
 nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
-                     bool no_wait_reserve, bool no_wait_gpu,
-                     struct ttm_mem_reg *new_mem)
+                     bool no_wait_gpu, struct ttm_mem_reg *new_mem)
 {
        u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
        struct ttm_placement placement;
@@ -1112,15 +1107,15 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
 
        tmp_mem = *new_mem;
        tmp_mem.mm_node = NULL;
-       ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
+       ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
        if (ret)
                return ret;
 
-       ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
+       ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
        if (ret)
                goto out;
 
-       ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, new_mem);
+       ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_mem);
        if (ret)
                goto out;
 
@@ -1195,8 +1190,7 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
 
 static int
 nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
-               bool no_wait_reserve, bool no_wait_gpu,
-               struct ttm_mem_reg *new_mem)
+               bool no_wait_gpu, struct ttm_mem_reg *new_mem)
 {
        struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
        struct nouveau_bo *nvbo = nouveau_bo(bo);
@@ -1220,23 +1214,26 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
 
        /* CPU copy if we have no accelerated method available */
        if (!drm->ttm.move) {
-               ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+               ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
                goto out;
        }
 
        /* Hardware assisted copy. */
        if (new_mem->mem_type == TTM_PL_SYSTEM)
-               ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
+               ret = nouveau_bo_move_flipd(bo, evict, intr,
+                                           no_wait_gpu, new_mem);
        else if (old_mem->mem_type == TTM_PL_SYSTEM)
-               ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
+               ret = nouveau_bo_move_flips(bo, evict, intr,
+                                           no_wait_gpu, new_mem);
        else
-               ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
+               ret = nouveau_bo_move_m2mf(bo, evict, intr,
+                                          no_wait_gpu, new_mem);
 
        if (!ret)
                goto out;
 
        /* Fallback to software copy. */
-       ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+       ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
 
 out:
        if (nv_device(drm->device)->card_type < NV_50) {
@@ -1343,7 +1340,7 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
        nvbo->placement.fpfn = 0;
        nvbo->placement.lpfn = mappable;
        nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
-       return nouveau_bo_validate(nvbo, false, true, false);
+       return nouveau_bo_validate(nvbo, false, false);
 }
 
 static int
@@ -1472,19 +1469,19 @@ nouveau_bo_fence_ref(void *sync_obj)
 }
 
 static bool
-nouveau_bo_fence_signalled(void *sync_obj, void *sync_arg)
+nouveau_bo_fence_signalled(void *sync_obj)
 {
        return nouveau_fence_done(sync_obj);
 }
 
 static int
-nouveau_bo_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
+nouveau_bo_fence_wait(void *sync_obj, bool lazy, bool intr)
 {
        return nouveau_fence_wait(sync_obj, lazy, intr);
 }
 
 static int
-nouveau_bo_fence_flush(void *sync_obj, void *sync_arg)
+nouveau_bo_fence_flush(void *sync_obj)
 {
        return 0;
 }
index dec51b1..25ca379 100644 (file)
@@ -76,7 +76,7 @@ u32  nouveau_bo_rd32(struct nouveau_bo *, unsigned index);
 void nouveau_bo_wr32(struct nouveau_bo *, unsigned index, u32 val);
 void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *);
 int  nouveau_bo_validate(struct nouveau_bo *, bool interruptible,
-                        bool no_wait_reserve, bool no_wait_gpu);
+                        bool no_wait_gpu);
 
 struct nouveau_vma *
 nouveau_bo_vma_find(struct nouveau_bo *, struct nouveau_vm *);
index c1d7301..174300b 100644 (file)
@@ -76,6 +76,8 @@ nouveau_channel_del(struct nouveau_channel **pchan)
                nouveau_object_del(client, NVDRM_DEVICE, chan->push.handle);
                nouveau_bo_vma_del(chan->push.buffer, &chan->push.vma);
                nouveau_bo_unmap(chan->push.buffer);
+               if (chan->push.buffer && chan->push.buffer->pin_refcnt)
+                       nouveau_bo_unpin(chan->push.buffer);
                nouveau_bo_ref(NULL, &chan->push.buffer);
                kfree(chan);
        }
@@ -267,7 +269,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
        struct nouveau_fb *pfb = nouveau_fb(device);
        struct nouveau_software_chan *swch;
        struct nouveau_object *object;
-       struct nv_dma_class args;
+       struct nv_dma_class args = {};
        int ret, i;
 
        /* allocate dma objects to cover all allowed vram, and gart */
@@ -346,7 +348,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
        /* allocate software object class (used for fences on <= nv05, and
         * to signal flip completion), bind it to a subchannel.
         */
-       if (chan != chan->drm->cechan) {
+       if ((device->card_type < NV_E0) || gart /* nve0: want_nvsw */) {
                ret = nouveau_object_new(nv_object(client), chan->handle,
                                         NvSw, nouveau_abi16_swclass(chan->drm),
                                         NULL, 0, &object);
index d3595b2..ac340ba 100644 (file)
@@ -110,7 +110,6 @@ nouveau_connector_destroy(struct drm_connector *connector)
        dev  = nv_connector->base.dev;
        drm  = nouveau_drm(dev);
        gpio = nouveau_gpio(drm->device);
-       NV_DEBUG(drm, "\n");
 
        if (gpio && nv_connector->hpd != DCB_GPIO_UNUSED) {
                gpio->isr_del(gpio, 0, nv_connector->hpd, 0xff,
@@ -221,7 +220,7 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
        }
 
        if (nv_connector->type == DCB_CONNECTOR_DVI_I) {
-               drm_connector_property_set_value(connector,
+               drm_object_property_set_value(&connector->base,
                        dev->mode_config.dvi_i_subconnector_property,
                        nv_encoder->dcb->type == DCB_OUTPUT_TMDS ?
                        DRM_MODE_SUBCONNECTOR_DVID :
@@ -929,8 +928,6 @@ nouveau_connector_create(struct drm_device *dev, int index)
        int type, ret = 0;
        bool dummy;
 
-       NV_DEBUG(drm, "\n");
-
        list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
                nv_connector = nouveau_connector(connector);
                if (nv_connector->index == index)
@@ -1043,7 +1040,7 @@ nouveau_connector_create(struct drm_device *dev, int index)
 
        /* Init DVI-I specific properties */
        if (nv_connector->type == DCB_CONNECTOR_DVI_I)
-               drm_connector_attach_property(connector, dev->mode_config.dvi_i_subconnector_property, 0);
+               drm_object_attach_property(&connector->base, dev->mode_config.dvi_i_subconnector_property, 0);
 
        /* Add overscan compensation options to digital outputs */
        if (disp->underscan_property &&
@@ -1051,31 +1048,31 @@ nouveau_connector_create(struct drm_device *dev, int index)
             type == DRM_MODE_CONNECTOR_DVII ||
             type == DRM_MODE_CONNECTOR_HDMIA ||
             type == DRM_MODE_CONNECTOR_DisplayPort)) {
-               drm_connector_attach_property(connector,
+               drm_object_attach_property(&connector->base,
                                              disp->underscan_property,
                                              UNDERSCAN_OFF);
-               drm_connector_attach_property(connector,
+               drm_object_attach_property(&connector->base,
                                              disp->underscan_hborder_property,
                                              0);
-               drm_connector_attach_property(connector,
+               drm_object_attach_property(&connector->base,
                                              disp->underscan_vborder_property,
                                              0);
        }
 
        /* Add hue and saturation options */
        if (disp->vibrant_hue_property)
-               drm_connector_attach_property(connector,
+               drm_object_attach_property(&connector->base,
                                              disp->vibrant_hue_property,
                                              90);
        if (disp->color_vibrance_property)
-               drm_connector_attach_property(connector,
+               drm_object_attach_property(&connector->base,
                                              disp->color_vibrance_property,
                                              150);
 
        switch (nv_connector->type) {
        case DCB_CONNECTOR_VGA:
                if (nv_device(drm->device)->card_type >= NV_50) {
-                       drm_connector_attach_property(connector,
+                       drm_object_attach_property(&connector->base,
                                        dev->mode_config.scaling_mode_property,
                                        nv_connector->scaling_mode);
                }
@@ -1088,18 +1085,18 @@ nouveau_connector_create(struct drm_device *dev, int index)
        default:
                nv_connector->scaling_mode = DRM_MODE_SCALE_FULLSCREEN;
 
-               drm_connector_attach_property(connector,
+               drm_object_attach_property(&connector->base,
                                dev->mode_config.scaling_mode_property,
                                nv_connector->scaling_mode);
                if (disp->dithering_mode) {
                        nv_connector->dithering_mode = DITHERING_MODE_AUTO;
-                       drm_connector_attach_property(connector,
+                       drm_object_attach_property(&connector->base,
                                                disp->dithering_mode,
                                                nv_connector->dithering_mode);
                }
                if (disp->dithering_depth) {
                        nv_connector->dithering_depth = DITHERING_DEPTH_AUTO;
-                       drm_connector_attach_property(connector,
+                       drm_object_attach_property(&connector->base,
                                                disp->dithering_depth,
                                                nv_connector->dithering_depth);
                }
index ebdb876..20eb84c 100644 (file)
@@ -28,6 +28,7 @@
 #define __NOUVEAU_CONNECTOR_H__
 
 #include <drm/drm_edid.h>
+#include "nouveau_crtc.h"
 
 struct nouveau_i2c_port;
 
@@ -80,6 +81,21 @@ static inline struct nouveau_connector *nouveau_connector(
        return container_of(con, struct nouveau_connector, base);
 }
 
+static inline struct nouveau_connector *
+nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc)
+{
+       struct drm_device *dev = nv_crtc->base.dev;
+       struct drm_connector *connector;
+       struct drm_crtc *crtc = to_drm_crtc(nv_crtc);
+
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               if (connector->encoder && connector->encoder->crtc == crtc)
+                       return nouveau_connector(connector);
+       }
+
+       return NULL;
+}
+
 struct drm_connector *
 nouveau_connector_create(struct drm_device *, int index);
 
index e6d0d1e..d1e5890 100644 (file)
@@ -82,16 +82,6 @@ static inline struct drm_crtc *to_drm_crtc(struct nouveau_crtc *crtc)
        return &crtc->base;
 }
 
-int nv50_crtc_create(struct drm_device *dev, int index);
-int nv50_crtc_cursor_set(struct drm_crtc *drm_crtc, struct drm_file *file_priv,
-                        uint32_t buffer_handle, uint32_t width,
-                        uint32_t height);
-int nv50_crtc_cursor_move(struct drm_crtc *drm_crtc, int x, int y);
-
 int nv04_cursor_init(struct nouveau_crtc *);
-int nv50_cursor_init(struct nouveau_crtc *);
-
-struct nouveau_connector *
-nouveau_crtc_connector_get(struct nouveau_crtc *crtc);
 
 #endif /* __NOUVEAU_CRTC_H__ */
index 86124b1..e4188f2 100644 (file)
@@ -98,12 +98,12 @@ nouveau_framebuffer_init(struct drm_device *dev,
                        nv_fb->r_dma = NvEvoVRAM_LP;
 
                switch (fb->depth) {
-               case  8: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_8; break;
-               case 15: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_15; break;
-               case 16: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_16; break;
+               case  8: nv_fb->r_format = 0x1e00; break;
+               case 15: nv_fb->r_format = 0xe900; break;
+               case 16: nv_fb->r_format = 0xe800; break;
                case 24:
-               case 32: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_24; break;
-               case 30: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_30; break;
+               case 32: nv_fb->r_format = 0xcf00; break;
+               case 30: nv_fb->r_format = 0xd100; break;
                default:
                         NV_ERROR(drm, "unknown depth %d\n", fb->depth);
                         return -EINVAL;
@@ -324,7 +324,7 @@ nouveau_display_create(struct drm_device *dev)
        disp->underscan_vborder_property =
                drm_property_create_range(dev, 0, "underscan vborder", 0, 128);
 
-       if (gen == 1) {
+       if (gen >= 1) {
                disp->vibrant_hue_property =
                        drm_property_create(dev, DRM_MODE_PROP_RANGE,
                                            "vibrant hue", 2);
@@ -366,10 +366,7 @@ nouveau_display_create(struct drm_device *dev)
                if (nv_device(drm->device)->card_type < NV_50)
                        ret = nv04_display_create(dev);
                else
-               if (nv_device(drm->device)->card_type < NV_D0)
                        ret = nv50_display_create(dev);
-               else
-                       ret = nvd0_display_create(dev);
                if (ret)
                        goto disp_create_err;
 
@@ -400,11 +397,12 @@ nouveau_display_destroy(struct drm_device *dev)
        nouveau_backlight_exit(dev);
        drm_vblank_cleanup(dev);
 
+       drm_kms_helper_poll_fini(dev);
+       drm_mode_config_cleanup(dev);
+
        if (disp->dtor)
                disp->dtor(dev);
 
-       drm_kms_helper_poll_fini(dev);
-       drm_mode_config_cleanup(dev);
        nouveau_drm(dev)->display = NULL;
        kfree(disp);
 }
@@ -659,10 +657,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
 
        /* Emit a page flip */
        if (nv_device(drm->device)->card_type >= NV_50) {
-               if (nv_device(drm->device)->card_type >= NV_D0)
-                       ret = nvd0_display_flip_next(crtc, fb, chan, 0);
-               else
-                       ret = nv50_display_flip_next(crtc, fb, chan);
+               ret = nv50_display_flip_next(crtc, fb, chan, 0);
                if (ret) {
                        mutex_unlock(&chan->cli->mutex);
                        goto fail_unreserve;
index 978a108..5983865 100644 (file)
 #include "nouveau_encoder.h"
 #include "nouveau_crtc.h"
 
+#include <core/class.h>
+
 #include <subdev/gpio.h>
 #include <subdev/i2c.h>
 
-u8 *
-nouveau_dp_bios_data(struct drm_device *dev, struct dcb_output *dcb, u8 **entry)
-{
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct bit_entry d;
-       u8 *table;
-       int i;
-
-       if (bit_table(dev, 'd', &d)) {
-               NV_ERROR(drm, "BIT 'd' table not found\n");
-               return NULL;
-       }
-
-       if (d.version != 1) {
-               NV_ERROR(drm, "BIT 'd' table version %d unknown\n", d.version);
-               return NULL;
-       }
-
-       table = ROMPTR(dev, d.data[0]);
-       if (!table) {
-               NV_ERROR(drm, "displayport table pointer invalid\n");
-               return NULL;
-       }
-
-       switch (table[0]) {
-       case 0x20:
-       case 0x21:
-       case 0x30:
-       case 0x40:
-               break;
-       default:
-               NV_ERROR(drm, "displayport table 0x%02x unknown\n", table[0]);
-               return NULL;
-       }
-
-       for (i = 0; i < table[3]; i++) {
-               *entry = ROMPTR(dev, table[table[1] + (i * table[2])]);
-               if (*entry && bios_encoder_match(dcb, ROM32((*entry)[0])))
-                       return table;
-       }
-
-       NV_ERROR(drm, "displayport encoder table not found\n");
-       return NULL;
-}
-
 /******************************************************************************
  * link training
  *****************************************************************************/
 struct dp_state {
        struct nouveau_i2c_port *auxch;
-       struct dp_train_func *func;
+       struct nouveau_object *core;
        struct dcb_output *dcb;
        int crtc;
        u8 *dpcd;
@@ -97,13 +54,20 @@ static void
 dp_set_link_config(struct drm_device *dev, struct dp_state *dp)
 {
        struct nouveau_drm *drm = nouveau_drm(dev);
+       struct dcb_output *dcb = dp->dcb;
+       const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
+       const u32 moff = (dp->crtc << 3) | (link << 2) | or;
        u8 sink[2];
+       u32 data;
 
        NV_DEBUG(drm, "%d lanes at %d KB/s\n", dp->link_nr, dp->link_bw);
 
        /* set desired link configuration on the source */
-       dp->func->link_set(dev, dp->dcb, dp->crtc, dp->link_nr, dp->link_bw,
-                          dp->dpcd[2] & DP_ENHANCED_FRAME_CAP);
+       data = ((dp->link_bw / 27000) << 8) | dp->link_nr;
+       if (dp->dpcd[2] & DP_ENHANCED_FRAME_CAP)
+               data |= NV94_DISP_SOR_DP_LNKCTL_FRAME_ENH;
+
+       nv_call(dp->core, NV94_DISP_SOR_DP_LNKCTL + moff, data);
 
        /* inform the sink of the new configuration */
        sink[0] = dp->link_bw / 27000;
@@ -118,11 +82,14 @@ static void
 dp_set_training_pattern(struct drm_device *dev, struct dp_state *dp, u8 pattern)
 {
        struct nouveau_drm *drm = nouveau_drm(dev);
+       struct dcb_output *dcb = dp->dcb;
+       const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
+       const u32 moff = (dp->crtc << 3) | (link << 2) | or;
        u8 sink_tp;
 
        NV_DEBUG(drm, "training pattern %d\n", pattern);
 
-       dp->func->train_set(dev, dp->dcb, pattern);
+       nv_call(dp->core, NV94_DISP_SOR_DP_TRAIN + moff, pattern);
 
        nv_rdaux(dp->auxch, DP_TRAINING_PATTERN_SET, &sink_tp, 1);
        sink_tp &= ~DP_TRAINING_PATTERN_MASK;
@@ -134,6 +101,9 @@ static int
 dp_link_train_commit(struct drm_device *dev, struct dp_state *dp)
 {
        struct nouveau_drm *drm = nouveau_drm(dev);
+       struct dcb_output *dcb = dp->dcb;
+       const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
+       const u32 moff = (dp->crtc << 3) | (link << 2) | or;
        int i;
 
        for (i = 0; i < dp->link_nr; i++) {
@@ -148,7 +118,8 @@ dp_link_train_commit(struct drm_device *dev, struct dp_state *dp)
                        dp->conf[i] |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
 
                NV_DEBUG(drm, "config lane %d %02x\n", i, dp->conf[i]);
-               dp->func->train_adj(dev, dp->dcb, i, lvsw, lpre);
+
+               nv_call(dp->core, NV94_DISP_SOR_DP_DRVCTL(i) + moff, (lvsw << 8) | lpre);
        }
 
        return nv_wraux(dp->auxch, DP_TRAINING_LANE0_SET, dp->conf, 4);
@@ -234,59 +205,32 @@ dp_link_train_eq(struct drm_device *dev, struct dp_state *dp)
 }
 
 static void
-dp_set_downspread(struct drm_device *dev, struct dp_state *dp, bool enable)
+dp_link_train_init(struct drm_device *dev, struct dp_state *dp, bool spread)
 {
-       u16 script = 0x0000;
-       u8 *entry, *table = nouveau_dp_bios_data(dev, dp->dcb, &entry);
-       if (table) {
-               if (table[0] >= 0x20 && table[0] <= 0x30) {
-                       if (enable) script = ROM16(entry[12]);
-                       else        script = ROM16(entry[14]);
-               } else
-               if (table[0] == 0x40) {
-                       if (enable) script = ROM16(entry[11]);
-                       else        script = ROM16(entry[13]);
-               }
-       }
-
-       nouveau_bios_run_init_table(dev, script, dp->dcb, dp->crtc);
-}
-
-static void
-dp_link_train_init(struct drm_device *dev, struct dp_state *dp)
-{
-       u16 script = 0x0000;
-       u8 *entry, *table = nouveau_dp_bios_data(dev, dp->dcb, &entry);
-       if (table) {
-               if (table[0] >= 0x20 && table[0] <= 0x30)
-                       script = ROM16(entry[6]);
-               else
-               if (table[0] == 0x40)
-                       script = ROM16(entry[5]);
-       }
-
-       nouveau_bios_run_init_table(dev, script, dp->dcb, dp->crtc);
+       struct dcb_output *dcb = dp->dcb;
+       const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
+       const u32 moff = (dp->crtc << 3) | (link << 2) | or;
+
+       nv_call(dp->core, NV94_DISP_SOR_DP_TRAIN + moff, (spread ?
+                         NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_ON :
+                         NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_OFF) |
+                         NV94_DISP_SOR_DP_TRAIN_OP_INIT);
 }
 
 static void
 dp_link_train_fini(struct drm_device *dev, struct dp_state *dp)
 {
-       u16 script = 0x0000;
-       u8 *entry, *table = nouveau_dp_bios_data(dev, dp->dcb, &entry);
-       if (table) {
-               if (table[0] >= 0x20 && table[0] <= 0x30)
-                       script = ROM16(entry[8]);
-               else
-               if (table[0] == 0x40)
-                       script = ROM16(entry[7]);
-       }
+       struct dcb_output *dcb = dp->dcb;
+       const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
+       const u32 moff = (dp->crtc << 3) | (link << 2) | or;
 
-       nouveau_bios_run_init_table(dev, script, dp->dcb, dp->crtc);
+       nv_call(dp->core, NV94_DISP_SOR_DP_TRAIN + moff,
+                         NV94_DISP_SOR_DP_TRAIN_OP_FINI);
 }
 
 static bool
 nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate,
-                     struct dp_train_func *func)
+                     struct nouveau_object *core)
 {
        struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
        struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
@@ -304,7 +248,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate,
        if (!dp.auxch)
                return false;
 
-       dp.func = func;
+       dp.core = core;
        dp.dcb = nv_encoder->dcb;
        dp.crtc = nv_crtc->index;
        dp.dpcd = nv_encoder->dp.dpcd;
@@ -318,11 +262,8 @@ nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate,
         */
        gpio->irq(gpio, 0, nv_connector->hpd, 0xff, false);
 
-       /* enable down-spreading, if possible */
-       dp_set_downspread(dev, &dp, nv_encoder->dp.dpcd[3] & 1);
-
-       /* execute pre-train script from vbios */
-       dp_link_train_init(dev, &dp);
+       /* enable down-spreading and execute pre-train script from vbios */
+       dp_link_train_init(dev, &dp, nv_encoder->dp.dpcd[3] & 1);
 
        /* start off at highest link rate supported by encoder and display */
        while (*link_bw > nv_encoder->dp.link_bw)
@@ -365,7 +306,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate,
 
 void
 nouveau_dp_dpms(struct drm_encoder *encoder, int mode, u32 datarate,
-               struct dp_train_func *func)
+               struct nouveau_object *core)
 {
        struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
        struct nouveau_drm *drm = nouveau_drm(encoder->dev);
@@ -385,7 +326,7 @@ nouveau_dp_dpms(struct drm_encoder *encoder, int mode, u32 datarate,
        nv_wraux(auxch, DP_SET_POWER, &status, 1);
 
        if (mode == DRM_MODE_DPMS_ON)
-               nouveau_dp_link_train(encoder, datarate, func);
+               nouveau_dp_link_train(encoder, datarate, core);
 }
 
 static void
index 8503b2e..01c403d 100644 (file)
@@ -49,8 +49,6 @@
 #include "nouveau_fbcon.h"
 #include "nouveau_fence.h"
 
-#include "nouveau_ttm.h"
-
 MODULE_PARM_DESC(config, "option string to pass to driver core");
 static char *nouveau_config;
 module_param_named(config, nouveau_config, charp, 0400);
@@ -149,7 +147,7 @@ nouveau_accel_init(struct nouveau_drm *drm)
                        NV_ERROR(drm, "failed to create ce channel, %d\n", ret);
 
                arg0 = NVE0_CHANNEL_IND_ENGINE_GR;
-               arg1 = 0;
+               arg1 = 1;
        } else {
                arg0 = NvDmaFB;
                arg1 = NvDmaTT;
@@ -224,6 +222,7 @@ nouveau_drm_probe(struct pci_dev *pdev, const struct pci_device_id *pent)
        boot = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
 #endif
        remove_conflicting_framebuffers(aper, "nouveaufb", boot);
+       kfree(aper);
 
        ret = nouveau_device_create(pdev, nouveau_name(pdev), pci_name(pdev),
                                    nouveau_config, nouveau_debug, &device);
@@ -395,17 +394,12 @@ nouveau_drm_remove(struct pci_dev *pdev)
 }
 
 int
-nouveau_drm_suspend(struct pci_dev *pdev, pm_message_t pm_state)
+nouveau_do_suspend(struct drm_device *dev)
 {
-       struct drm_device *dev = pci_get_drvdata(pdev);
        struct nouveau_drm *drm = nouveau_drm(dev);
        struct nouveau_cli *cli;
        int ret;
 
-       if (dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
-           pm_state.event == PM_EVENT_PRETHAW)
-               return 0;
-
        if (dev->mode_config.num_crtc) {
                NV_INFO(drm, "suspending fbcon...\n");
                nouveau_fbcon_set_suspend(dev, 1);
@@ -436,13 +430,6 @@ nouveau_drm_suspend(struct pci_dev *pdev, pm_message_t pm_state)
                goto fail_client;
 
        nouveau_agp_fini(drm);
-
-       pci_save_state(pdev);
-       if (pm_state.event == PM_EVENT_SUSPEND) {
-               pci_disable_device(pdev);
-               pci_set_power_state(pdev, PCI_D3hot);
-       }
-
        return 0;
 
 fail_client:
@@ -457,24 +444,33 @@ fail_client:
        return ret;
 }
 
-int
-nouveau_drm_resume(struct pci_dev *pdev)
+int nouveau_pmops_suspend(struct device *dev)
 {
-       struct drm_device *dev = pci_get_drvdata(pdev);
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nouveau_cli *cli;
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct drm_device *drm_dev = pci_get_drvdata(pdev);
        int ret;
 
-       if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+       if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
                return 0;
 
-       NV_INFO(drm, "re-enabling device...\n");
-       pci_set_power_state(pdev, PCI_D0);
-       pci_restore_state(pdev);
-       ret = pci_enable_device(pdev);
+       ret = nouveau_do_suspend(drm_dev);
        if (ret)
                return ret;
-       pci_set_master(pdev);
+
+       pci_save_state(pdev);
+       pci_disable_device(pdev);
+       pci_set_power_state(pdev, PCI_D3hot);
+
+       return 0;
+}
+
+int
+nouveau_do_resume(struct drm_device *dev)
+{
+       struct nouveau_drm *drm = nouveau_drm(dev);
+       struct nouveau_cli *cli;
+
+       NV_INFO(drm, "re-enabling device...\n");
 
        nouveau_agp_reset(drm);
 
@@ -500,6 +496,42 @@ nouveau_drm_resume(struct pci_dev *pdev)
        return 0;
 }
 
+int nouveau_pmops_resume(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct drm_device *drm_dev = pci_get_drvdata(pdev);
+       int ret;
+
+       if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+               return 0;
+
+       pci_set_power_state(pdev, PCI_D0);
+       pci_restore_state(pdev);
+       ret = pci_enable_device(pdev);
+       if (ret)
+               return ret;
+       pci_set_master(pdev);
+
+       return nouveau_do_resume(drm_dev);
+}
+
+static int nouveau_pmops_freeze(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct drm_device *drm_dev = pci_get_drvdata(pdev);
+
+       return nouveau_do_suspend(drm_dev);
+}
+
+static int nouveau_pmops_thaw(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct drm_device *drm_dev = pci_get_drvdata(pdev);
+
+       return nouveau_do_resume(drm_dev);
+}
+
+
 static int
 nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
 {
@@ -652,14 +684,22 @@ nouveau_drm_pci_table[] = {
        {}
 };
 
+static const struct dev_pm_ops nouveau_pm_ops = {
+       .suspend = nouveau_pmops_suspend,
+       .resume = nouveau_pmops_resume,
+       .freeze = nouveau_pmops_freeze,
+       .thaw = nouveau_pmops_thaw,
+       .poweroff = nouveau_pmops_freeze,
+       .restore = nouveau_pmops_resume,
+};
+
 static struct pci_driver
 nouveau_drm_pci_driver = {
        .name = "nouveau",
        .id_table = nouveau_drm_pci_table,
        .probe = nouveau_drm_probe,
        .remove = nouveau_drm_remove,
-       .suspend = nouveau_drm_suspend,
-       .resume = nouveau_drm_resume,
+       .driver.pm = &nouveau_pm_ops,
 };
 
 static int __init
index a101699..aa89eb9 100644 (file)
@@ -129,8 +129,8 @@ nouveau_dev(struct drm_device *dev)
        return nv_device(nouveau_drm(dev)->device);
 }
 
-int nouveau_drm_suspend(struct pci_dev *, pm_message_t);
-int nouveau_drm_resume(struct pci_dev *);
+int nouveau_pmops_suspend(struct device *);
+int nouveau_pmops_resume(struct device *);
 
 #define NV_FATAL(cli, fmt, args...) nv_fatal((cli), fmt, ##args)
 #define NV_ERROR(cli, fmt, args...) nv_error((cli), fmt, ##args)
index 6a17bf2..d0d95bd 100644 (file)
@@ -93,14 +93,9 @@ get_slave_funcs(struct drm_encoder *enc)
 /* nouveau_dp.c */
 bool nouveau_dp_detect(struct drm_encoder *);
 void nouveau_dp_dpms(struct drm_encoder *, int mode, u32 datarate,
-                    struct dp_train_func *);
-u8 *nouveau_dp_bios_data(struct drm_device *, struct dcb_output *, u8 **);
+                    struct nouveau_object *);
 
 struct nouveau_connector *
 nouveau_encoder_connector_get(struct nouveau_encoder *encoder);
-int nv50_sor_create(struct drm_connector *, struct dcb_output *);
-void nv50_sor_dp_calc_tu(struct drm_device *, int, int, u32, u32);
-int nv50_dac_create(struct drm_connector *, struct dcb_output *);
-
 
 #endif /* __NOUVEAU_ENCODER_H__ */
index 5e2f521..8bf695c 100644 (file)
@@ -433,7 +433,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
                        return ret;
                }
 
-               ret = nouveau_bo_validate(nvbo, true, false, false);
+               ret = nouveau_bo_validate(nvbo, true, false);
                if (unlikely(ret)) {
                        if (ret != -ERESTARTSYS)
                                NV_ERROR(drm, "fail ttm_validate\n");
diff --git a/drivers/gpu/drm/nouveau/nouveau_hdmi.c b/drivers/gpu/drm/nouveau/nouveau_hdmi.c
deleted file mode 100644 (file)
index 2c672ce..0000000
+++ /dev/null
@@ -1,261 +0,0 @@
-/*
- * Copyright 2011 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <drm/drmP.h>
-#include "nouveau_drm.h"
-#include "nouveau_connector.h"
-#include "nouveau_encoder.h"
-#include "nouveau_crtc.h"
-
-static bool
-hdmi_sor(struct drm_encoder *encoder)
-{
-       struct nouveau_drm *drm = nouveau_drm(encoder->dev);
-       if (nv_device(drm->device)->chipset <  0xa3 ||
-           nv_device(drm->device)->chipset == 0xaa ||
-           nv_device(drm->device)->chipset == 0xac)
-               return false;
-       return true;
-}
-
-static inline u32
-hdmi_base(struct drm_encoder *encoder)
-{
-       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-       struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
-       if (!hdmi_sor(encoder))
-               return 0x616500 + (nv_crtc->index * 0x800);
-       return 0x61c500 + (nv_encoder->or * 0x800);
-}
-
-static void
-hdmi_wr32(struct drm_encoder *encoder, u32 reg, u32 val)
-{
-       struct nouveau_device *device = nouveau_dev(encoder->dev);
-       nv_wr32(device, hdmi_base(encoder) + reg, val);
-}
-
-static u32
-hdmi_rd32(struct drm_encoder *encoder, u32 reg)
-{
-       struct nouveau_device *device = nouveau_dev(encoder->dev);
-       return nv_rd32(device, hdmi_base(encoder) + reg);
-}
-
-static u32
-hdmi_mask(struct drm_encoder *encoder, u32 reg, u32 mask, u32 val)
-{
-       u32 tmp = hdmi_rd32(encoder, reg);
-       hdmi_wr32(encoder, reg, (tmp & ~mask) | val);
-       return tmp;
-}
-
-static void
-nouveau_audio_disconnect(struct drm_encoder *encoder)
-{
-       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-       struct nouveau_device *device = nouveau_dev(encoder->dev);
-       u32 or = nv_encoder->or * 0x800;
-
-       if (hdmi_sor(encoder))
-               nv_mask(device, 0x61c448 + or, 0x00000003, 0x00000000);
-}
-
-static void
-nouveau_audio_mode_set(struct drm_encoder *encoder,
-                      struct drm_display_mode *mode)
-{
-       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-       struct nouveau_device *device = nouveau_dev(encoder->dev);
-       struct nouveau_connector *nv_connector;
-       u32 or = nv_encoder->or * 0x800;
-       int i;
-
-       nv_connector = nouveau_encoder_connector_get(nv_encoder);
-       if (!drm_detect_monitor_audio(nv_connector->edid)) {
-               nouveau_audio_disconnect(encoder);
-               return;
-       }
-
-       if (hdmi_sor(encoder)) {
-               nv_mask(device, 0x61c448 + or, 0x00000001, 0x00000001);
-
-               drm_edid_to_eld(&nv_connector->base, nv_connector->edid);
-               if (nv_connector->base.eld[0]) {
-                       u8 *eld = nv_connector->base.eld;
-                       for (i = 0; i < eld[2] * 4; i++)
-                               nv_wr32(device, 0x61c440 + or, (i << 8) | eld[i]);
-                       for (i = eld[2] * 4; i < 0x60; i++)
-                               nv_wr32(device, 0x61c440 + or, (i << 8) | 0x00);
-                       nv_mask(device, 0x61c448 + or, 0x00000002, 0x00000002);
-               }
-       }
-}
-
-static void
-nouveau_hdmi_infoframe(struct drm_encoder *encoder, u32 ctrl, u8 *frame)
-{
-       /* calculate checksum for the infoframe */
-       u8 sum = 0, i;
-       for (i = 0; i < frame[2]; i++)
-               sum += frame[i];
-       frame[3] = 256 - sum;
-
-       /* disable infoframe, and write header */
-       hdmi_mask(encoder, ctrl + 0x00, 0x00000001, 0x00000000);
-       hdmi_wr32(encoder, ctrl + 0x08, *(u32 *)frame & 0xffffff);
-
-       /* register scans tell me the audio infoframe has only one set of
-        * subpack regs, according to tegra (gee nvidia, it'd be nice if we
-        * could get those docs too!), the hdmi block pads out the rest of
-        * the packet on its own.
-        */
-       if (ctrl == 0x020)
-               frame[2] = 6;
-
-       /* write out checksum and data, weird weird 7 byte register pairs */
-       for (i = 0; i < frame[2] + 1; i += 7) {
-               u32 rsubpack = ctrl + 0x0c + ((i / 7) * 8);
-               u32 *subpack = (u32 *)&frame[3 + i];
-               hdmi_wr32(encoder, rsubpack + 0, subpack[0]);
-               hdmi_wr32(encoder, rsubpack + 4, subpack[1] & 0xffffff);
-       }
-
-       /* enable the infoframe */
-       hdmi_mask(encoder, ctrl, 0x00000001, 0x00000001);
-}
-
-static void
-nouveau_hdmi_video_infoframe(struct drm_encoder *encoder,
-                            struct drm_display_mode *mode)
-{
-       const u8 Y = 0, A = 0, B = 0, S = 0, C = 0, M = 0, R = 0;
-       const u8 ITC = 0, EC = 0, Q = 0, SC = 0, VIC = 0, PR = 0;
-       const u8 bar_top = 0, bar_bottom = 0, bar_left = 0, bar_right = 0;
-       u8 frame[20];
-
-       frame[0x00] = 0x82; /* AVI infoframe */
-       frame[0x01] = 0x02; /* version */
-       frame[0x02] = 0x0d; /* length */
-       frame[0x03] = 0x00;
-       frame[0x04] = (Y << 5) | (A << 4) | (B << 2) | S;
-       frame[0x05] = (C << 6) | (M << 4) | R;
-       frame[0x06] = (ITC << 7) | (EC << 4) | (Q << 2) | SC;
-       frame[0x07] = VIC;
-       frame[0x08] = PR;
-       frame[0x09] = bar_top & 0xff;
-       frame[0x0a] = bar_top >> 8;
-       frame[0x0b] = bar_bottom & 0xff;
-       frame[0x0c] = bar_bottom >> 8;
-       frame[0x0d] = bar_left & 0xff;
-       frame[0x0e] = bar_left >> 8;
-       frame[0x0f] = bar_right & 0xff;
-       frame[0x10] = bar_right >> 8;
-       frame[0x11] = 0x00;
-       frame[0x12] = 0x00;
-       frame[0x13] = 0x00;
-
-       nouveau_hdmi_infoframe(encoder, 0x020, frame);
-}
-
-static void
-nouveau_hdmi_audio_infoframe(struct drm_encoder *encoder,
-                            struct drm_display_mode *mode)
-{
-       const u8 CT = 0x00, CC = 0x01, ceaSS = 0x00, SF = 0x00, FMT = 0x00;
-       const u8 CA = 0x00, DM_INH = 0, LSV = 0x00;
-       u8 frame[12];
-
-       frame[0x00] = 0x84;     /* Audio infoframe */
-       frame[0x01] = 0x01;     /* version */
-       frame[0x02] = 0x0a;     /* length */
-       frame[0x03] = 0x00;
-       frame[0x04] = (CT << 4) | CC;
-       frame[0x05] = (SF << 2) | ceaSS;
-       frame[0x06] = FMT;
-       frame[0x07] = CA;
-       frame[0x08] = (DM_INH << 7) | (LSV << 3);
-       frame[0x09] = 0x00;
-       frame[0x0a] = 0x00;
-       frame[0x0b] = 0x00;
-
-       nouveau_hdmi_infoframe(encoder, 0x000, frame);
-}
-
-static void
-nouveau_hdmi_disconnect(struct drm_encoder *encoder)
-{
-       nouveau_audio_disconnect(encoder);
-
-       /* disable audio and avi infoframes */
-       hdmi_mask(encoder, 0x000, 0x00000001, 0x00000000);
-       hdmi_mask(encoder, 0x020, 0x00000001, 0x00000000);
-
-       /* disable hdmi */
-       hdmi_mask(encoder, 0x0a4, 0x40000000, 0x00000000);
-}
-
-void
-nouveau_hdmi_mode_set(struct drm_encoder *encoder,
-                     struct drm_display_mode *mode)
-{
-       struct nouveau_device *device = nouveau_dev(encoder->dev);
-       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-       struct nouveau_connector *nv_connector;
-       u32 max_ac_packet, rekey;
-
-       nv_connector = nouveau_encoder_connector_get(nv_encoder);
-       if (!mode || !nv_connector || !nv_connector->edid ||
-           !drm_detect_hdmi_monitor(nv_connector->edid)) {
-               nouveau_hdmi_disconnect(encoder);
-               return;
-       }
-
-       nouveau_hdmi_video_infoframe(encoder, mode);
-       nouveau_hdmi_audio_infoframe(encoder, mode);
-
-       hdmi_mask(encoder, 0x0d0, 0x00070001, 0x00010001); /* SPARE, HW_CTS */
-       hdmi_mask(encoder, 0x068, 0x00010101, 0x00000000); /* ACR_CTRL, ?? */
-       hdmi_mask(encoder, 0x078, 0x80000000, 0x80000000); /* ACR_0441_ENABLE */
-
-       nv_mask(device, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
-       nv_mask(device, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
-       nv_mask(device, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
-
-       /* value matches nvidia binary driver, and tegra constant */
-       rekey = 56;
-
-       max_ac_packet  = mode->htotal - mode->hdisplay;
-       max_ac_packet -= rekey;
-       max_ac_packet -= 18; /* constant from tegra */
-       max_ac_packet /= 32;
-
-       /* enable hdmi */
-       hdmi_mask(encoder, 0x0a4, 0x5f1f003f, 0x40000000 | /* enable */
-                                             0x1f000000 | /* unknown */
-                                             max_ac_packet << 16 |
-                                             rekey);
-
-       nouveau_audio_mode_set(encoder, mode);
-}
index 1d8cb50..1303680 100644 (file)
@@ -60,18 +60,6 @@ nouveau_irq_handler(DRM_IRQ_ARGS)
                return IRQ_NONE;
 
        nv_subdev(pmc)->intr(nv_subdev(pmc));
-
-       if (dev->mode_config.num_crtc) {
-               if (device->card_type >= NV_D0) {
-                       if (nv_rd32(device, 0x000100) & 0x04000000)
-                               nvd0_display_intr(dev);
-               } else
-               if (device->card_type >= NV_50) {
-                       if (nv_rd32(device, 0x000100) & 0x04000000)
-                               nv50_display_intr(dev);
-               }
-       }
-
        return IRQ_HANDLED;
 }
 
index 366462c..3543fec 100644 (file)
@@ -155,10 +155,6 @@ nouveau_prime_new(struct drm_device *dev,
                return ret;
        nvbo = *pnvbo;
 
-       /* we restrict allowed domains on nv50+ to only the types
-        * that were requested at creation time.  not possibly on
-        * earlier chips without busting the ABI.
-        */
        nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART;
        nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
        if (!nvbo->gem) {
index 6f0ac64..25d3495 100644 (file)
@@ -31,12 +31,11 @@ nouveau_switcheroo_set_state(struct pci_dev *pdev,
                             enum vga_switcheroo_state state)
 {
        struct drm_device *dev = pci_get_drvdata(pdev);
-       pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
 
        if (state == VGA_SWITCHEROO_ON) {
                printk(KERN_ERR "VGA switcheroo: switched nouveau on\n");
                dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
-               nouveau_drm_resume(pdev);
+               nouveau_pmops_resume(&pdev->dev);
                drm_kms_helper_poll_enable(dev);
                dev->switch_power_state = DRM_SWITCH_POWER_ON;
        } else {
@@ -44,7 +43,7 @@ nouveau_switcheroo_set_state(struct pci_dev *pdev,
                dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
                drm_kms_helper_poll_disable(dev);
                nouveau_switcheroo_optimus_dsm();
-               nouveau_drm_suspend(pdev, pmm);
+               nouveau_pmops_suspend(&pdev->dev);
                dev->switch_power_state = DRM_SWITCH_POWER_OFF;
        }
 }
index 82a0d9c..6578cd2 100644 (file)
@@ -730,6 +730,7 @@ static void nv_crtc_destroy(struct drm_crtc *crtc)
        drm_crtc_cleanup(crtc);
 
        nouveau_bo_unmap(nv_crtc->cursor.nvbo);
+       nouveau_bo_unpin(nv_crtc->cursor.nvbo);
        nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
        kfree(nv_crtc);
 }
@@ -1056,8 +1057,11 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num)
                             0, 0x0000, NULL, &nv_crtc->cursor.nvbo);
        if (!ret) {
                ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
-               if (!ret)
+               if (!ret) {
                        ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
+                       if (ret)
+                               nouveau_bo_unpin(nv_crtc->cursor.nvbo);
+               }
                if (ret)
                        nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
        }
index 846050f..2cd6fb8 100644 (file)
@@ -60,8 +60,6 @@ nv04_display_create(struct drm_device *dev)
        struct nv04_display *disp;
        int i, ret;
 
-       NV_DEBUG(drm, "\n");
-
        disp = kzalloc(sizeof(*disp), GFP_KERNEL);
        if (!disp)
                return -ENOMEM;
@@ -132,13 +130,10 @@ nv04_display_create(struct drm_device *dev)
 void
 nv04_display_destroy(struct drm_device *dev)
 {
-       struct nouveau_drm *drm = nouveau_drm(dev);
        struct nv04_display *disp = nv04_display(dev);
        struct drm_encoder *encoder;
        struct drm_crtc *crtc;
 
-       NV_DEBUG(drm, "\n");
-
        /* Turn every CRTC off. */
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
                struct drm_mode_set modeset = {
index ce752bf..7ae7f97 100644 (file)
@@ -155,6 +155,8 @@ nv10_fence_destroy(struct nouveau_drm *drm)
 {
        struct nv10_fence_priv *priv = drm->fence;
        nouveau_bo_unmap(priv->bo);
+       if (priv->bo)
+               nouveau_bo_unpin(priv->bo);
        nouveau_bo_ref(NULL, &priv->bo);
        drm->fence = NULL;
        kfree(priv);
@@ -183,8 +185,11 @@ nv10_fence_create(struct nouveau_drm *drm)
                                     0, 0x0000, NULL, &priv->bo);
                if (!ret) {
                        ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
-                       if (!ret)
+                       if (!ret) {
                                ret = nouveau_bo_map(priv->bo);
+                               if (ret)
+                                       nouveau_bo_unpin(priv->bo);
+                       }
                        if (ret)
                                nouveau_bo_ref(NULL, &priv->bo);
                }
index 897b636..2ca276a 100644 (file)
@@ -195,7 +195,7 @@ nv17_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector)
                break;
        }
 
-       drm_connector_property_set_value(connector,
+       drm_object_property_set_value(&connector->base,
                                         conf->tv_subconnector_property,
                                         tv_enc->subconnector);
 
@@ -672,25 +672,25 @@ static int nv17_tv_create_resources(struct drm_encoder *encoder,
 
        drm_mode_create_tv_properties(dev, num_tv_norms, nv17_tv_norm_names);
 
-       drm_connector_attach_property(connector,
+       drm_object_attach_property(&connector->base,
                                        conf->tv_select_subconnector_property,
                                        tv_enc->select_subconnector);
-       drm_connector_attach_property(connector,
+       drm_object_attach_property(&connector->base,
                                        conf->tv_subconnector_property,
                                        tv_enc->subconnector);
-       drm_connector_attach_property(connector,
+       drm_object_attach_property(&connector->base,
                                        conf->tv_mode_property,
                                        tv_enc->tv_norm);
-       drm_connector_attach_property(connector,
+       drm_object_attach_property(&connector->base,
                                        conf->tv_flicker_reduction_property,
                                        tv_enc->flicker);
-       drm_connector_attach_property(connector,
+       drm_object_attach_property(&connector->base,
                                        conf->tv_saturation_property,
                                        tv_enc->saturation);
-       drm_connector_attach_property(connector,
+       drm_object_attach_property(&connector->base,
                                        conf->tv_hue_property,
                                        tv_enc->hue);
-       drm_connector_attach_property(connector,
+       drm_object_attach_property(&connector->base,
                                        conf->tv_overscan_property,
                                        tv_enc->overscan);
 
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
deleted file mode 100644 (file)
index 222de77..0000000
+++ /dev/null
@@ -1,764 +0,0 @@
-/*
- * Copyright (C) 2008 Maarten Maathuis.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
-
-#include "nouveau_reg.h"
-#include "nouveau_drm.h"
-#include "nouveau_dma.h"
-#include "nouveau_gem.h"
-#include "nouveau_hw.h"
-#include "nouveau_encoder.h"
-#include "nouveau_crtc.h"
-#include "nouveau_connector.h"
-#include "nv50_display.h"
-
-#include <subdev/clock.h>
-
-static void
-nv50_crtc_lut_load(struct drm_crtc *crtc)
-{
-       struct nouveau_drm *drm = nouveau_drm(crtc->dev);
-       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-       void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo);
-       int i;
-
-       NV_DEBUG(drm, "\n");
-
-       for (i = 0; i < 256; i++) {
-               writew(nv_crtc->lut.r[i] >> 2, lut + 8*i + 0);
-               writew(nv_crtc->lut.g[i] >> 2, lut + 8*i + 2);
-               writew(nv_crtc->lut.b[i] >> 2, lut + 8*i + 4);
-       }
-
-       if (nv_crtc->lut.depth == 30) {
-               writew(nv_crtc->lut.r[i - 1] >> 2, lut + 8*i + 0);
-               writew(nv_crtc->lut.g[i - 1] >> 2, lut + 8*i + 2);
-               writew(nv_crtc->lut.b[i - 1] >> 2, lut + 8*i + 4);
-       }
-}
-
-int
-nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
-{
-       struct drm_device *dev = nv_crtc->base.dev;
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nouveau_channel *evo = nv50_display(dev)->master;
-       int index = nv_crtc->index, ret;
-
-       NV_DEBUG(drm, "index %d\n", nv_crtc->index);
-       NV_DEBUG(drm, "%s\n", blanked ? "blanked" : "unblanked");
-
-       if (blanked) {
-               nv_crtc->cursor.hide(nv_crtc, false);
-
-               ret = RING_SPACE(evo, nv_device(drm->device)->chipset != 0x50 ? 7 : 5);
-               if (ret) {
-                       NV_ERROR(drm, "no space while blanking crtc\n");
-                       return ret;
-               }
-               BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2);
-               OUT_RING(evo, NV50_EVO_CRTC_CLUT_MODE_BLANK);
-               OUT_RING(evo, 0);
-               if (nv_device(drm->device)->chipset != 0x50) {
-                       BEGIN_NV04(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1);
-                       OUT_RING(evo, NV84_EVO_CRTC_CLUT_DMA_HANDLE_NONE);
-               }
-
-               BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1);
-               OUT_RING(evo, NV50_EVO_CRTC_FB_DMA_HANDLE_NONE);
-       } else {
-               if (nv_crtc->cursor.visible)
-                       nv_crtc->cursor.show(nv_crtc, false);
-               else
-                       nv_crtc->cursor.hide(nv_crtc, false);
-
-               ret = RING_SPACE(evo, nv_device(drm->device)->chipset != 0x50 ? 10 : 8);
-               if (ret) {
-                       NV_ERROR(drm, "no space while unblanking crtc\n");
-                       return ret;
-               }
-               BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2);
-               OUT_RING(evo, nv_crtc->lut.depth == 8 ?
-                               NV50_EVO_CRTC_CLUT_MODE_OFF :
-                               NV50_EVO_CRTC_CLUT_MODE_ON);
-               OUT_RING(evo, nv_crtc->lut.nvbo->bo.offset >> 8);
-               if (nv_device(drm->device)->chipset != 0x50) {
-                       BEGIN_NV04(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1);
-                       OUT_RING(evo, NvEvoVRAM);
-               }
-
-               BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, FB_OFFSET), 2);
-               OUT_RING(evo, nv_crtc->fb.offset >> 8);
-               OUT_RING(evo, 0);
-               BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1);
-               if (nv_device(drm->device)->chipset != 0x50)
-                       if (nv_crtc->fb.tile_flags == 0x7a00 ||
-                           nv_crtc->fb.tile_flags == 0xfe00)
-                               OUT_RING(evo, NvEvoFB32);
-                       else
-                       if (nv_crtc->fb.tile_flags == 0x7000)
-                               OUT_RING(evo, NvEvoFB16);
-                       else
-                               OUT_RING(evo, NvEvoVRAM_LP);
-               else
-                       OUT_RING(evo, NvEvoVRAM_LP);
-       }
-
-       nv_crtc->fb.blanked = blanked;
-       return 0;
-}
-
-static int
-nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update)
-{
-       struct nouveau_channel *evo = nv50_display(nv_crtc->base.dev)->master;
-       struct nouveau_connector *nv_connector;
-       struct drm_connector *connector;
-       int head = nv_crtc->index, ret;
-       u32 mode = 0x00;
-
-       nv_connector = nouveau_crtc_connector_get(nv_crtc);
-       connector = &nv_connector->base;
-       if (nv_connector->dithering_mode == DITHERING_MODE_AUTO) {
-               if (nv_crtc->base.fb->depth > connector->display_info.bpc * 3)
-                       mode = DITHERING_MODE_DYNAMIC2X2;
-       } else {
-               mode = nv_connector->dithering_mode;
-       }
-
-       if (nv_connector->dithering_depth == DITHERING_DEPTH_AUTO) {
-               if (connector->display_info.bpc >= 8)
-                       mode |= DITHERING_DEPTH_8BPC;
-       } else {
-               mode |= nv_connector->dithering_depth;
-       }
-
-       ret = RING_SPACE(evo, 2 + (update ? 2 : 0));
-       if (ret == 0) {
-               BEGIN_NV04(evo, 0, NV50_EVO_CRTC(head, DITHER_CTRL), 1);
-               OUT_RING  (evo, mode);
-               if (update) {
-                       BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
-                       OUT_RING  (evo, 0);
-                       FIRE_RING (evo);
-               }
-       }
-
-       return ret;
-}
-
-static int
-nv50_crtc_set_color_vibrance(struct nouveau_crtc *nv_crtc, bool update)
-{
-       struct drm_device *dev = nv_crtc->base.dev;
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nouveau_channel *evo = nv50_display(dev)->master;
-       int ret;
-       int adj;
-       u32 hue, vib;
-
-       NV_DEBUG(drm, "vibrance = %i, hue = %i\n",
-                    nv_crtc->color_vibrance, nv_crtc->vibrant_hue);
-
-       ret = RING_SPACE(evo, 2 + (update ? 2 : 0));
-       if (ret) {
-               NV_ERROR(drm, "no space while setting color vibrance\n");
-               return ret;
-       }
-
-       adj = (nv_crtc->color_vibrance > 0) ? 50 : 0;
-       vib = ((nv_crtc->color_vibrance * 2047 + adj) / 100) & 0xfff;
-
-       hue = ((nv_crtc->vibrant_hue * 2047) / 100) & 0xfff;
-
-       BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, COLOR_CTRL), 1);
-       OUT_RING  (evo, (hue << 20) | (vib << 8));
-
-       if (update) {
-               BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
-               OUT_RING  (evo, 0);
-               FIRE_RING (evo);
-       }
-
-       return 0;
-}
-
-struct nouveau_connector *
-nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc)
-{
-       struct drm_device *dev = nv_crtc->base.dev;
-       struct drm_connector *connector;
-       struct drm_crtc *crtc = to_drm_crtc(nv_crtc);
-
-       /* The safest approach is to find an encoder with the right crtc, that
-        * is also linked to a connector. */
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-               if (connector->encoder)
-                       if (connector->encoder->crtc == crtc)
-                               return nouveau_connector(connector);
-       }
-
-       return NULL;
-}
-
-static int
-nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
-{
-       struct nouveau_connector *nv_connector;
-       struct drm_crtc *crtc = &nv_crtc->base;
-       struct drm_device *dev = crtc->dev;
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nouveau_channel *evo = nv50_display(dev)->master;
-       struct drm_display_mode *umode = &crtc->mode;
-       struct drm_display_mode *omode;
-       int scaling_mode, ret;
-       u32 ctrl = 0, oX, oY;
-
-       NV_DEBUG(drm, "\n");
-
-       nv_connector = nouveau_crtc_connector_get(nv_crtc);
-       if (!nv_connector || !nv_connector->native_mode) {
-               NV_ERROR(drm, "no native mode, forcing panel scaling\n");
-               scaling_mode = DRM_MODE_SCALE_NONE;
-       } else {
-               scaling_mode = nv_connector->scaling_mode;
-       }
-
-       /* start off at the resolution we programmed the crtc for, this
-        * effectively handles NONE/FULL scaling
-        */
-       if (scaling_mode != DRM_MODE_SCALE_NONE)
-               omode = nv_connector->native_mode;
-       else
-               omode = umode;
-
-       oX = omode->hdisplay;
-       oY = omode->vdisplay;
-       if (omode->flags & DRM_MODE_FLAG_DBLSCAN)
-               oY *= 2;
-
-       /* add overscan compensation if necessary, will keep the aspect
-        * ratio the same as the backend mode unless overridden by the
-        * user setting both hborder and vborder properties.
-        */
-       if (nv_connector && ( nv_connector->underscan == UNDERSCAN_ON ||
-                            (nv_connector->underscan == UNDERSCAN_AUTO &&
-                             nv_connector->edid &&
-                             drm_detect_hdmi_monitor(nv_connector->edid)))) {
-               u32 bX = nv_connector->underscan_hborder;
-               u32 bY = nv_connector->underscan_vborder;
-               u32 aspect = (oY << 19) / oX;
-
-               if (bX) {
-                       oX -= (bX * 2);
-                       if (bY) oY -= (bY * 2);
-                       else    oY  = ((oX * aspect) + (aspect / 2)) >> 19;
-               } else {
-                       oX -= (oX >> 4) + 32;
-                       if (bY) oY -= (bY * 2);
-                       else    oY  = ((oX * aspect) + (aspect / 2)) >> 19;
-               }
-       }
-
-       /* handle CENTER/ASPECT scaling, taking into account the areas
-        * removed already for overscan compensation
-        */
-       switch (scaling_mode) {
-       case DRM_MODE_SCALE_CENTER:
-               oX = min((u32)umode->hdisplay, oX);
-               oY = min((u32)umode->vdisplay, oY);
-               /* fall-through */
-       case DRM_MODE_SCALE_ASPECT:
-               if (oY < oX) {
-                       u32 aspect = (umode->hdisplay << 19) / umode->vdisplay;
-                       oX = ((oY * aspect) + (aspect / 2)) >> 19;
-               } else {
-                       u32 aspect = (umode->vdisplay << 19) / umode->hdisplay;
-                       oY = ((oX * aspect) + (aspect / 2)) >> 19;
-               }
-               break;
-       default:
-               break;
-       }
-
-       if (umode->hdisplay != oX || umode->vdisplay != oY ||
-           umode->flags & DRM_MODE_FLAG_INTERLACE ||
-           umode->flags & DRM_MODE_FLAG_DBLSCAN)
-               ctrl |= NV50_EVO_CRTC_SCALE_CTRL_ACTIVE;
-
-       ret = RING_SPACE(evo, 5);
-       if (ret)
-               return ret;
-
-       BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_CTRL), 1);
-       OUT_RING  (evo, ctrl);
-       BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_RES1), 2);
-       OUT_RING  (evo, oY << 16 | oX);
-       OUT_RING  (evo, oY << 16 | oX);
-
-       if (update) {
-               nv50_display_flip_stop(crtc);
-               nv50_display_sync(dev);
-               nv50_display_flip_next(crtc, crtc->fb, NULL);
-       }
-
-       return 0;
-}
-
-int
-nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
-{
-       struct nouveau_device *device = nouveau_dev(dev);
-       struct nouveau_clock *clk = nouveau_clock(device);
-
-       return clk->pll_set(clk, PLL_VPLL0 + head, pclk);
-}
-
-static void
-nv50_crtc_destroy(struct drm_crtc *crtc)
-{
-       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-       struct nouveau_drm *drm = nouveau_drm(crtc->dev);
-
-       NV_DEBUG(drm, "\n");
-
-       nouveau_bo_unmap(nv_crtc->lut.nvbo);
-       nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
-       nouveau_bo_unmap(nv_crtc->cursor.nvbo);
-       nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
-       drm_crtc_cleanup(&nv_crtc->base);
-       kfree(nv_crtc);
-}
-
-int
-nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
-                    uint32_t buffer_handle, uint32_t width, uint32_t height)
-{
-       struct drm_device *dev = crtc->dev;
-       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-       struct nouveau_bo *cursor = NULL;
-       struct drm_gem_object *gem;
-       int ret = 0, i;
-
-       if (!buffer_handle) {
-               nv_crtc->cursor.hide(nv_crtc, true);
-               return 0;
-       }
-
-       if (width != 64 || height != 64)
-               return -EINVAL;
-
-       gem = drm_gem_object_lookup(dev, file_priv, buffer_handle);
-       if (!gem)
-               return -ENOENT;
-       cursor = nouveau_gem_object(gem);
-
-       ret = nouveau_bo_map(cursor);
-       if (ret)
-               goto out;
-
-       /* The simple will do for now. */
-       for (i = 0; i < 64 * 64; i++)
-               nouveau_bo_wr32(nv_crtc->cursor.nvbo, i, nouveau_bo_rd32(cursor, i));
-
-       nouveau_bo_unmap(cursor);
-
-       nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.offset);
-       nv_crtc->cursor.show(nv_crtc, true);
-
-out:
-       drm_gem_object_unreference_unlocked(gem);
-       return ret;
-}
-
-int
-nv50_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
-{
-       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-
-       nv_crtc->cursor.set_pos(nv_crtc, x, y);
-       return 0;
-}
-
-static void
-nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
-                   uint32_t start, uint32_t size)
-{
-       int end = (start + size > 256) ? 256 : start + size, i;
-       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-
-       for (i = start; i < end; i++) {
-               nv_crtc->lut.r[i] = r[i];
-               nv_crtc->lut.g[i] = g[i];
-               nv_crtc->lut.b[i] = b[i];
-       }
-
-       /* We need to know the depth before we upload, but it's possible to
-        * get called before a framebuffer is bound.  If this is the case,
-        * mark the lut values as dirty by setting depth==0, and it'll be
-        * uploaded on the first mode_set_base()
-        */
-       if (!nv_crtc->base.fb) {
-               nv_crtc->lut.depth = 0;
-               return;
-       }
-
-       nv50_crtc_lut_load(crtc);
-}
-
-static void
-nv50_crtc_save(struct drm_crtc *crtc)
-{
-       struct nouveau_drm *drm = nouveau_drm(crtc->dev);
-       NV_ERROR(drm, "!!\n");
-}
-
-static void
-nv50_crtc_restore(struct drm_crtc *crtc)
-{
-       struct nouveau_drm *drm = nouveau_drm(crtc->dev);
-       NV_ERROR(drm, "!!\n");
-}
-
-static const struct drm_crtc_funcs nv50_crtc_funcs = {
-       .save = nv50_crtc_save,
-       .restore = nv50_crtc_restore,
-       .cursor_set = nv50_crtc_cursor_set,
-       .cursor_move = nv50_crtc_cursor_move,
-       .gamma_set = nv50_crtc_gamma_set,
-       .set_config = drm_crtc_helper_set_config,
-       .page_flip = nouveau_crtc_page_flip,
-       .destroy = nv50_crtc_destroy,
-};
-
-static void
-nv50_crtc_dpms(struct drm_crtc *crtc, int mode)
-{
-}
-
-static void
-nv50_crtc_prepare(struct drm_crtc *crtc)
-{
-       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-       struct drm_device *dev = crtc->dev;
-       struct nouveau_drm *drm = nouveau_drm(dev);
-
-       NV_DEBUG(drm, "index %d\n", nv_crtc->index);
-
-       nv50_display_flip_stop(crtc);
-       drm_vblank_pre_modeset(dev, nv_crtc->index);
-       nv50_crtc_blank(nv_crtc, true);
-}
-
-static void
-nv50_crtc_commit(struct drm_crtc *crtc)
-{
-       struct drm_device *dev = crtc->dev;
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-
-       NV_DEBUG(drm, "index %d\n", nv_crtc->index);
-
-       nv50_crtc_blank(nv_crtc, false);
-       drm_vblank_post_modeset(dev, nv_crtc->index);
-       nv50_display_sync(dev);
-       nv50_display_flip_next(crtc, crtc->fb, NULL);
-}
-
-static bool
-nv50_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode,
-                    struct drm_display_mode *adjusted_mode)
-{
-       return true;
-}
-
-static int
-nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
-                          struct drm_framebuffer *passed_fb,
-                          int x, int y, bool atomic)
-{
-       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-       struct drm_device *dev = nv_crtc->base.dev;
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nouveau_channel *evo = nv50_display(dev)->master;
-       struct drm_framebuffer *drm_fb;
-       struct nouveau_framebuffer *fb;
-       int ret;
-
-       NV_DEBUG(drm, "index %d\n", nv_crtc->index);
-
-       /* no fb bound */
-       if (!atomic && !crtc->fb) {
-               NV_DEBUG(drm, "No FB bound\n");
-               return 0;
-       }
-
-       /* If atomic, we want to switch to the fb we were passed, so
-        * now we update pointers to do that.  (We don't pin; just
-        * assume we're already pinned and update the base address.)
-        */
-       if (atomic) {
-               drm_fb = passed_fb;
-               fb = nouveau_framebuffer(passed_fb);
-       } else {
-               drm_fb = crtc->fb;
-               fb = nouveau_framebuffer(crtc->fb);
-               /* If not atomic, we can go ahead and pin, and unpin the
-                * old fb we were passed.
-                */
-               ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM);
-               if (ret)
-                       return ret;
-
-               if (passed_fb) {
-                       struct nouveau_framebuffer *ofb = nouveau_framebuffer(passed_fb);
-                       nouveau_bo_unpin(ofb->nvbo);
-               }
-       }
-
-       nv_crtc->fb.offset = fb->nvbo->bo.offset;
-       nv_crtc->fb.tile_flags = nouveau_bo_tile_layout(fb->nvbo);
-       nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8;
-       if (!nv_crtc->fb.blanked && nv_device(drm->device)->chipset != 0x50) {
-               ret = RING_SPACE(evo, 2);
-               if (ret)
-                       return ret;
-
-               BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_DMA), 1);
-               OUT_RING  (evo, fb->r_dma);
-       }
-
-       ret = RING_SPACE(evo, 12);
-       if (ret)
-               return ret;
-
-       BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_OFFSET), 5);
-       OUT_RING  (evo, nv_crtc->fb.offset >> 8);
-       OUT_RING  (evo, 0);
-       OUT_RING  (evo, (drm_fb->height << 16) | drm_fb->width);
-       OUT_RING  (evo, fb->r_pitch);
-       OUT_RING  (evo, fb->r_format);
-
-       BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CLUT_MODE), 1);
-       OUT_RING  (evo, fb->base.depth == 8 ?
-                  NV50_EVO_CRTC_CLUT_MODE_OFF : NV50_EVO_CRTC_CLUT_MODE_ON);
-
-       BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_POS), 1);
-       OUT_RING  (evo, (y << 16) | x);
-
-       if (nv_crtc->lut.depth != fb->base.depth) {
-               nv_crtc->lut.depth = fb->base.depth;
-               nv50_crtc_lut_load(crtc);
-       }
-
-       return 0;
-}
-
-static int
-nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
-                  struct drm_display_mode *mode, int x, int y,
-                  struct drm_framebuffer *old_fb)
-{
-       struct drm_device *dev = crtc->dev;
-       struct nouveau_channel *evo = nv50_display(dev)->master;
-       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-       u32 head = nv_crtc->index * 0x400;
-       u32 ilace = (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 1;
-       u32 vscan = (mode->flags & DRM_MODE_FLAG_DBLSCAN) ? 2 : 1;
-       u32 hactive, hsynce, hbackp, hfrontp, hblanke, hblanks;
-       u32 vactive, vsynce, vbackp, vfrontp, vblanke, vblanks;
-       u32 vblan2e = 0, vblan2s = 1;
-       int ret;
-
-       /* hw timing description looks like this:
-        *
-        * <sync> <back porch> <---------display---------> <front porch>
-        * ______
-        *       |____________|---------------------------|____________|
-        *
-        *       ^ synce      ^ blanke                    ^ blanks     ^ active
-        *
-        * interlaced modes also have 2 additional values pointing at the end
-        * and start of the next field's blanking period.
-        */
-
-       hactive = mode->htotal;
-       hsynce  = mode->hsync_end - mode->hsync_start - 1;
-       hbackp  = mode->htotal - mode->hsync_end;
-       hblanke = hsynce + hbackp;
-       hfrontp = mode->hsync_start - mode->hdisplay;
-       hblanks = mode->htotal - hfrontp - 1;
-
-       vactive = mode->vtotal * vscan / ilace;
-       vsynce  = ((mode->vsync_end - mode->vsync_start) * vscan / ilace) - 1;
-       vbackp  = (mode->vtotal - mode->vsync_end) * vscan / ilace;
-       vblanke = vsynce + vbackp;
-       vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace;
-       vblanks = vactive - vfrontp - 1;
-       if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
-               vblan2e = vactive + vsynce + vbackp;
-               vblan2s = vblan2e + (mode->vdisplay * vscan / ilace);
-               vactive = (vactive * 2) + 1;
-       }
-
-       ret = RING_SPACE(evo, 18);
-       if (ret == 0) {
-               BEGIN_NV04(evo, 0, 0x0804 + head, 2);
-               OUT_RING  (evo, 0x00800000 | mode->clock);
-               OUT_RING  (evo, (ilace == 2) ? 2 : 0);
-               BEGIN_NV04(evo, 0, 0x0810 + head, 6);
-               OUT_RING  (evo, 0x00000000); /* border colour */
-               OUT_RING  (evo, (vactive << 16) | hactive);
-               OUT_RING  (evo, ( vsynce << 16) | hsynce);
-               OUT_RING  (evo, (vblanke << 16) | hblanke);
-               OUT_RING  (evo, (vblanks << 16) | hblanks);
-               OUT_RING  (evo, (vblan2e << 16) | vblan2s);
-               BEGIN_NV04(evo, 0, 0x082c + head, 1);
-               OUT_RING  (evo, 0x00000000);
-               BEGIN_NV04(evo, 0, 0x0900 + head, 1);
-               OUT_RING  (evo, 0x00000311); /* makes sync channel work */
-               BEGIN_NV04(evo, 0, 0x08c8 + head, 1);
-               OUT_RING  (evo, (umode->vdisplay << 16) | umode->hdisplay);
-               BEGIN_NV04(evo, 0, 0x08d4 + head, 1);
-               OUT_RING  (evo, 0x00000000); /* screen position */
-       }
-
-       nv_crtc->set_dither(nv_crtc, false);
-       nv_crtc->set_scale(nv_crtc, false);
-       nv_crtc->set_color_vibrance(nv_crtc, false);
-
-       return nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, false);
-}
-
-static int
-nv50_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
-                       struct drm_framebuffer *old_fb)
-{
-       int ret;
-
-       nv50_display_flip_stop(crtc);
-       ret = nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, false);
-       if (ret)
-               return ret;
-
-       ret = nv50_display_sync(crtc->dev);
-       if (ret)
-               return ret;
-
-       return nv50_display_flip_next(crtc, crtc->fb, NULL);
-}
-
-static int
-nv50_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
-                              struct drm_framebuffer *fb,
-                              int x, int y, enum mode_set_atomic state)
-{
-       int ret;
-
-       nv50_display_flip_stop(crtc);
-       ret = nv50_crtc_do_mode_set_base(crtc, fb, x, y, true);
-       if (ret)
-               return ret;
-
-       return nv50_display_sync(crtc->dev);
-}
-
-static const struct drm_crtc_helper_funcs nv50_crtc_helper_funcs = {
-       .dpms = nv50_crtc_dpms,
-       .prepare = nv50_crtc_prepare,
-       .commit = nv50_crtc_commit,
-       .mode_fixup = nv50_crtc_mode_fixup,
-       .mode_set = nv50_crtc_mode_set,
-       .mode_set_base = nv50_crtc_mode_set_base,
-       .mode_set_base_atomic = nv50_crtc_mode_set_base_atomic,
-       .load_lut = nv50_crtc_lut_load,
-};
-
-int
-nv50_crtc_create(struct drm_device *dev, int index)
-{
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nouveau_crtc *nv_crtc = NULL;
-       int ret, i;
-
-       NV_DEBUG(drm, "\n");
-
-       nv_crtc = kzalloc(sizeof(*nv_crtc), GFP_KERNEL);
-       if (!nv_crtc)
-               return -ENOMEM;
-
-       nv_crtc->index = index;
-       nv_crtc->set_dither = nv50_crtc_set_dither;
-       nv_crtc->set_scale = nv50_crtc_set_scale;
-       nv_crtc->set_color_vibrance = nv50_crtc_set_color_vibrance;
-       nv_crtc->color_vibrance = 50;
-       nv_crtc->vibrant_hue = 0;
-       nv_crtc->lut.depth = 0;
-       for (i = 0; i < 256; i++) {
-               nv_crtc->lut.r[i] = i << 8;
-               nv_crtc->lut.g[i] = i << 8;
-               nv_crtc->lut.b[i] = i << 8;
-       }
-
-       drm_crtc_init(dev, &nv_crtc->base, &nv50_crtc_funcs);
-       drm_crtc_helper_add(&nv_crtc->base, &nv50_crtc_helper_funcs);
-       drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
-
-       ret = nouveau_bo_new(dev, 4096, 0x100, TTM_PL_FLAG_VRAM,
-                            0, 0x0000, NULL, &nv_crtc->lut.nvbo);
-       if (!ret) {
-               ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM);
-               if (!ret)
-                       ret = nouveau_bo_map(nv_crtc->lut.nvbo);
-               if (ret)
-                       nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
-       }
-
-       if (ret)
-               goto out;
-
-
-       ret = nouveau_bo_new(dev, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
-                            0, 0x0000, NULL, &nv_crtc->cursor.nvbo);
-       if (!ret) {
-               ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
-               if (!ret)
-                       ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
-               if (ret)
-                       nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
-       }
-
-       if (ret)
-               goto out;
-
-       nv50_cursor_init(nv_crtc);
-out:
-       if (ret)
-               nv50_crtc_destroy(&nv_crtc->base);
-       return ret;
-}
diff --git a/drivers/gpu/drm/nouveau/nv50_cursor.c b/drivers/gpu/drm/nouveau/nv50_cursor.c
deleted file mode 100644 (file)
index 223da11..0000000
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * Copyright (C) 2008 Maarten Maathuis.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <drm/drmP.h>
-
-#include "nouveau_drm.h"
-#include "nouveau_dma.h"
-#include "nouveau_crtc.h"
-#include "nv50_display.h"
-
-static void
-nv50_cursor_show(struct nouveau_crtc *nv_crtc, bool update)
-{
-       struct drm_device *dev = nv_crtc->base.dev;
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nouveau_channel *evo = nv50_display(dev)->master;
-       int ret;
-
-       NV_DEBUG(drm, "\n");
-
-       if (update && nv_crtc->cursor.visible)
-               return;
-
-       ret = RING_SPACE(evo, (nv_device(drm->device)->chipset != 0x50 ? 5 : 3) + update * 2);
-       if (ret) {
-               NV_ERROR(drm, "no space while unhiding cursor\n");
-               return;
-       }
-
-       if (nv_device(drm->device)->chipset != 0x50) {
-               BEGIN_NV04(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1);
-               OUT_RING(evo, NvEvoVRAM);
-       }
-       BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2);
-       OUT_RING(evo, NV50_EVO_CRTC_CURSOR_CTRL_SHOW);
-       OUT_RING(evo, nv_crtc->cursor.offset >> 8);
-
-       if (update) {
-               BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
-               OUT_RING(evo, 0);
-               FIRE_RING(evo);
-               nv_crtc->cursor.visible = true;
-       }
-}
-
-static void
-nv50_cursor_hide(struct nouveau_crtc *nv_crtc, bool update)
-{
-       struct drm_device *dev = nv_crtc->base.dev;
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nouveau_channel *evo = nv50_display(dev)->master;
-       int ret;
-
-       NV_DEBUG(drm, "\n");
-
-       if (update && !nv_crtc->cursor.visible)
-               return;
-
-       ret = RING_SPACE(evo, (nv_device(drm->device)->chipset != 0x50 ? 5 : 3) + update * 2);
-       if (ret) {
-               NV_ERROR(drm, "no space while hiding cursor\n");
-               return;
-       }
-       BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2);
-       OUT_RING(evo, NV50_EVO_CRTC_CURSOR_CTRL_HIDE);
-       OUT_RING(evo, 0);
-       if (nv_device(drm->device)->chipset != 0x50) {
-               BEGIN_NV04(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1);
-               OUT_RING(evo, NV84_EVO_CRTC_CURSOR_DMA_HANDLE_NONE);
-       }
-
-       if (update) {
-               BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
-               OUT_RING(evo, 0);
-               FIRE_RING(evo);
-               nv_crtc->cursor.visible = false;
-       }
-}
-
-static void
-nv50_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
-{
-       struct nouveau_device *device = nouveau_dev(nv_crtc->base.dev);
-
-       nv_crtc->cursor_saved_x = x; nv_crtc->cursor_saved_y = y;
-       nv_wr32(device, NV50_PDISPLAY_CURSOR_USER_POS(nv_crtc->index),
-               ((y & 0xFFFF) << 16) | (x & 0xFFFF));
-       /* Needed to make the cursor move. */
-       nv_wr32(device, NV50_PDISPLAY_CURSOR_USER_POS_CTRL(nv_crtc->index), 0);
-}
-
-static void
-nv50_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
-{
-       if (offset == nv_crtc->cursor.offset)
-               return;
-
-       nv_crtc->cursor.offset = offset;
-       if (nv_crtc->cursor.visible) {
-               nv_crtc->cursor.visible = false;
-               nv_crtc->cursor.show(nv_crtc, true);
-       }
-}
-
-int
-nv50_cursor_init(struct nouveau_crtc *nv_crtc)
-{
-       nv_crtc->cursor.set_offset = nv50_cursor_set_offset;
-       nv_crtc->cursor.set_pos = nv50_cursor_set_pos;
-       nv_crtc->cursor.hide = nv50_cursor_hide;
-       nv_crtc->cursor.show = nv50_cursor_show;
-       return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nv50_dac.c b/drivers/gpu/drm/nouveau/nv50_dac.c
deleted file mode 100644 (file)
index 6a30a17..0000000
+++ /dev/null
@@ -1,321 +0,0 @@
-/*
- * Copyright (C) 2008 Maarten Maathuis.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
-
-#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
-#include "nouveau_reg.h"
-#include "nouveau_drm.h"
-#include "nouveau_dma.h"
-#include "nouveau_encoder.h"
-#include "nouveau_connector.h"
-#include "nouveau_crtc.h"
-#include "nv50_display.h"
-
-#include <subdev/timer.h>
-
-static void
-nv50_dac_disconnect(struct drm_encoder *encoder)
-{
-       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-       struct drm_device *dev = encoder->dev;
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nouveau_channel *evo = nv50_display(dev)->master;
-       int ret;
-
-       if (!nv_encoder->crtc)
-               return;
-       nv50_crtc_blank(nouveau_crtc(nv_encoder->crtc), true);
-
-       NV_DEBUG(drm, "Disconnecting DAC %d\n", nv_encoder->or);
-
-       ret = RING_SPACE(evo, 4);
-       if (ret) {
-               NV_ERROR(drm, "no space while disconnecting DAC\n");
-               return;
-       }
-       BEGIN_NV04(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 1);
-       OUT_RING  (evo, 0);
-       BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
-       OUT_RING  (evo, 0);
-
-       nv_encoder->crtc = NULL;
-}
-
-static enum drm_connector_status
-nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
-{
-       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-       struct nouveau_device *device = nouveau_dev(encoder->dev);
-       struct nouveau_drm *drm = nouveau_drm(encoder->dev);
-       enum drm_connector_status status = connector_status_disconnected;
-       uint32_t dpms_state, load_pattern, load_state;
-       int or = nv_encoder->or;
-
-       nv_wr32(device, NV50_PDISPLAY_DAC_CLK_CTRL1(or), 0x00000001);
-       dpms_state = nv_rd32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or));
-
-       nv_wr32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or),
-               0x00150000 | NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
-       if (!nv_wait(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or),
-                    NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) {
-               NV_ERROR(drm, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or);
-               NV_ERROR(drm, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or,
-                         nv_rd32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or)));
-               return status;
-       }
-
-       /* Use bios provided value if possible. */
-       if (drm->vbios.dactestval) {
-               load_pattern = drm->vbios.dactestval;
-               NV_DEBUG(drm, "Using bios provided load_pattern of %d\n",
-                         load_pattern);
-       } else {
-               load_pattern = 340;
-               NV_DEBUG(drm, "Using default load_pattern of %d\n",
-                        load_pattern);
-       }
-
-       nv_wr32(device, NV50_PDISPLAY_DAC_LOAD_CTRL(or),
-               NV50_PDISPLAY_DAC_LOAD_CTRL_ACTIVE | load_pattern);
-       mdelay(45); /* give it some time to process */
-       load_state = nv_rd32(device, NV50_PDISPLAY_DAC_LOAD_CTRL(or));
-
-       nv_wr32(device, NV50_PDISPLAY_DAC_LOAD_CTRL(or), 0);
-       nv_wr32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or), dpms_state |
-               NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
-
-       if ((load_state & NV50_PDISPLAY_DAC_LOAD_CTRL_PRESENT) ==
-                         NV50_PDISPLAY_DAC_LOAD_CTRL_PRESENT)
-               status = connector_status_connected;
-
-       if (status == connector_status_connected)
-               NV_DEBUG(drm, "Load was detected on output with or %d\n", or);
-       else
-               NV_DEBUG(drm, "Load was not detected on output with or %d\n", or);
-
-       return status;
-}
-
-static void
-nv50_dac_dpms(struct drm_encoder *encoder, int mode)
-{
-       struct nouveau_device *device = nouveau_dev(encoder->dev);
-       struct nouveau_drm *drm = nouveau_drm(encoder->dev);
-       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-       uint32_t val;
-       int or = nv_encoder->or;
-
-       NV_DEBUG(drm, "or %d mode %d\n", or, mode);
-
-       /* wait for it to be done */
-       if (!nv_wait(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or),
-                    NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) {
-               NV_ERROR(drm, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or);
-               NV_ERROR(drm, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or,
-                        nv_rd32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or)));
-               return;
-       }
-
-       val = nv_rd32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or)) & ~0x7F;
-
-       if (mode != DRM_MODE_DPMS_ON)
-               val |= NV50_PDISPLAY_DAC_DPMS_CTRL_BLANKED;
-
-       switch (mode) {
-       case DRM_MODE_DPMS_STANDBY:
-               val |= NV50_PDISPLAY_DAC_DPMS_CTRL_HSYNC_OFF;
-               break;
-       case DRM_MODE_DPMS_SUSPEND:
-               val |= NV50_PDISPLAY_DAC_DPMS_CTRL_VSYNC_OFF;
-               break;
-       case DRM_MODE_DPMS_OFF:
-               val |= NV50_PDISPLAY_DAC_DPMS_CTRL_OFF;
-               val |= NV50_PDISPLAY_DAC_DPMS_CTRL_HSYNC_OFF;
-               val |= NV50_PDISPLAY_DAC_DPMS_CTRL_VSYNC_OFF;
-               break;
-       default:
-               break;
-       }
-
-       nv_wr32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or), val |
-               NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
-}
-
-static void
-nv50_dac_save(struct drm_encoder *encoder)
-{
-       struct nouveau_drm *drm = nouveau_drm(encoder->dev);
-       NV_ERROR(drm, "!!\n");
-}
-
-static void
-nv50_dac_restore(struct drm_encoder *encoder)
-{
-       struct nouveau_drm *drm = nouveau_drm(encoder->dev);
-       NV_ERROR(drm, "!!\n");
-}
-
-static bool
-nv50_dac_mode_fixup(struct drm_encoder *encoder,
-                   const struct drm_display_mode *mode,
-                   struct drm_display_mode *adjusted_mode)
-{
-       struct nouveau_drm *drm = nouveau_drm(encoder->dev);
-       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-       struct nouveau_connector *connector;
-
-       NV_DEBUG(drm, "or %d\n", nv_encoder->or);
-
-       connector = nouveau_encoder_connector_get(nv_encoder);
-       if (!connector) {
-               NV_ERROR(drm, "Encoder has no connector\n");
-               return false;
-       }
-
-       if (connector->scaling_mode != DRM_MODE_SCALE_NONE &&
-            connector->native_mode)
-               drm_mode_copy(adjusted_mode, connector->native_mode);
-
-       return true;
-}
-
-static void
-nv50_dac_commit(struct drm_encoder *encoder)
-{
-}
-
-static void
-nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
-                 struct drm_display_mode *adjusted_mode)
-{
-       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-       struct nouveau_drm *drm = nouveau_drm(encoder->dev);
-       struct drm_device *dev = encoder->dev;
-       struct nouveau_channel *evo = nv50_display(dev)->master;
-       struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc);
-       uint32_t mode_ctl = 0, mode_ctl2 = 0;
-       int ret;
-
-       NV_DEBUG(drm, "or %d type %d crtc %d\n",
-                    nv_encoder->or, nv_encoder->dcb->type, crtc->index);
-
-       nv50_dac_dpms(encoder, DRM_MODE_DPMS_ON);
-
-       if (crtc->index == 1)
-               mode_ctl |= NV50_EVO_DAC_MODE_CTRL_CRTC1;
-       else
-               mode_ctl |= NV50_EVO_DAC_MODE_CTRL_CRTC0;
-
-       /* Lacking a working tv-out, this is not a 100% sure. */
-       if (nv_encoder->dcb->type == DCB_OUTPUT_ANALOG)
-               mode_ctl |= 0x40;
-       else
-       if (nv_encoder->dcb->type == DCB_OUTPUT_TV)
-               mode_ctl |= 0x100;
-
-       if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
-               mode_ctl2 |= NV50_EVO_DAC_MODE_CTRL2_NHSYNC;
-
-       if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
-               mode_ctl2 |= NV50_EVO_DAC_MODE_CTRL2_NVSYNC;
-
-       ret = RING_SPACE(evo, 3);
-       if (ret) {
-               NV_ERROR(drm, "no space while connecting DAC\n");
-               return;
-       }
-       BEGIN_NV04(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 2);
-       OUT_RING(evo, mode_ctl);
-       OUT_RING(evo, mode_ctl2);
-
-       nv_encoder->crtc = encoder->crtc;
-}
-
-static struct drm_crtc *
-nv50_dac_crtc_get(struct drm_encoder *encoder)
-{
-       return nouveau_encoder(encoder)->crtc;
-}
-
-static const struct drm_encoder_helper_funcs nv50_dac_helper_funcs = {
-       .dpms = nv50_dac_dpms,
-       .save = nv50_dac_save,
-       .restore = nv50_dac_restore,
-       .mode_fixup = nv50_dac_mode_fixup,
-       .prepare = nv50_dac_disconnect,
-       .commit = nv50_dac_commit,
-       .mode_set = nv50_dac_mode_set,
-       .get_crtc = nv50_dac_crtc_get,
-       .detect = nv50_dac_detect,
-       .disable = nv50_dac_disconnect
-};
-
-static void
-nv50_dac_destroy(struct drm_encoder *encoder)
-{
-       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-       struct nouveau_drm *drm = nouveau_drm(encoder->dev);
-
-       if (!encoder)
-               return;
-
-       NV_DEBUG(drm, "\n");
-
-       drm_encoder_cleanup(encoder);
-       kfree(nv_encoder);
-}
-
-static const struct drm_encoder_funcs nv50_dac_encoder_funcs = {
-       .destroy = nv50_dac_destroy,
-};
-
-int
-nv50_dac_create(struct drm_connector *connector, struct dcb_output *entry)
-{
-       struct nouveau_encoder *nv_encoder;
-       struct drm_encoder *encoder;
-
-       nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
-       if (!nv_encoder)
-               return -ENOMEM;
-       encoder = to_drm_encoder(nv_encoder);
-
-       nv_encoder->dcb = entry;
-       nv_encoder->or = ffs(entry->or) - 1;
-
-       drm_encoder_init(connector->dev, encoder, &nv50_dac_encoder_funcs,
-                        DRM_MODE_ENCODER_DAC);
-       drm_encoder_helper_add(encoder, &nv50_dac_helper_funcs);
-
-       encoder->possible_crtcs = entry->heads;
-       encoder->possible_clones = 0;
-
-       drm_mode_connector_attach_encoder(connector, encoder);
-       return 0;
-}
-
index f97b42c..3587408 100644 (file)
-/*
- * Copyright (C) 2008 Maarten Maathuis.
- * All Rights Reserved.
+       /*
+ * Copyright 2011 Red Hat Inc.
  *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
  *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
  *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
  *
+ * Authors: Ben Skeggs
  */
 
+#include <linux/dma-mapping.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+
 #include "nouveau_drm.h"
 #include "nouveau_dma.h"
-
-#include "nv50_display.h"
-#include "nouveau_crtc.h"
-#include "nouveau_encoder.h"
+#include "nouveau_gem.h"
 #include "nouveau_connector.h"
-#include "nouveau_fbcon.h"
-#include <drm/drm_crtc_helper.h>
+#include "nouveau_encoder.h"
+#include "nouveau_crtc.h"
 #include "nouveau_fence.h"
+#include "nv50_display.h"
 
+#include <core/client.h>
 #include <core/gpuobj.h>
-#include <subdev/timer.h>
-
-static void nv50_display_bh(unsigned long);
+#include <core/class.h>
 
-static inline int
-nv50_sor_nr(struct drm_device *dev)
+#include <subdev/timer.h>
+#include <subdev/bar.h>
+#include <subdev/fb.h>
+
+#define EVO_DMA_NR 9
+
+#define EVO_MASTER  (0x00)
+#define EVO_FLIP(c) (0x01 + (c))
+#define EVO_OVLY(c) (0x05 + (c))
+#define EVO_OIMM(c) (0x09 + (c))
+#define EVO_CURS(c) (0x0d + (c))
+
+/* offsets in shared sync bo of various structures */
+#define EVO_SYNC(c, o) ((c) * 0x0100 + (o))
+#define EVO_MAST_NTFY     EVO_SYNC(  0, 0x00)
+#define EVO_FLIP_SEM0(c)  EVO_SYNC((c), 0x00)
+#define EVO_FLIP_SEM1(c)  EVO_SYNC((c), 0x10)
+
+#define EVO_CORE_HANDLE      (0xd1500000)
+#define EVO_CHAN_HANDLE(t,i) (0xd15c0000 | (((t) & 0x00ff) << 8) | (i))
+#define EVO_CHAN_OCLASS(t,c) ((nv_hclass(c) & 0xff00) | ((t) & 0x00ff))
+#define EVO_PUSH_HANDLE(t,i) (0xd15b0000 | (i) |                               \
+                             (((NV50_DISP_##t##_CLASS) & 0x00ff) << 8))
+
+/******************************************************************************
+ * EVO channel
+ *****************************************************************************/
+
+struct nv50_chan {
+       struct nouveau_object *user;
+       u32 handle;
+};
+
+static int
+nv50_chan_create(struct nouveau_object *core, u32 bclass, u8 head,
+                void *data, u32 size, struct nv50_chan *chan)
 {
-       struct nouveau_device *device = nouveau_dev(dev);
+       struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
+       const u32 oclass = EVO_CHAN_OCLASS(bclass, core);
+       const u32 handle = EVO_CHAN_HANDLE(bclass, head);
+       int ret;
 
-       if (device->chipset  < 0x90 ||
-           device->chipset == 0x92 ||
-           device->chipset == 0xa0)
-               return 2;
+       ret = nouveau_object_new(client, EVO_CORE_HANDLE, handle,
+                                oclass, data, size, &chan->user);
+       if (ret)
+               return ret;
 
-       return 4;
+       chan->handle = handle;
+       return 0;
 }
 
-u32
-nv50_display_active_crtcs(struct drm_device *dev)
+static void
+nv50_chan_destroy(struct nouveau_object *core, struct nv50_chan *chan)
 {
-       struct nouveau_device *device = nouveau_dev(dev);
-       u32 mask = 0;
-       int i;
-
-       if (device->chipset  < 0x90 ||
-           device->chipset == 0x92 ||
-           device->chipset == 0xa0) {
-               for (i = 0; i < 2; i++)
-                       mask |= nv_rd32(device, NV50_PDISPLAY_SOR_MODE_CTRL_C(i));
-       } else {
-               for (i = 0; i < 4; i++)
-                       mask |= nv_rd32(device, NV90_PDISPLAY_SOR_MODE_CTRL_C(i));
-       }
+       struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
+       if (chan->handle)
+               nouveau_object_del(client, EVO_CORE_HANDLE, chan->handle);
+}
 
-       for (i = 0; i < 3; i++)
-               mask |= nv_rd32(device, NV50_PDISPLAY_DAC_MODE_CTRL_C(i));
+/******************************************************************************
+ * PIO EVO channel
+ *****************************************************************************/
 
-       return mask & 3;
-}
+struct nv50_pioc {
+       struct nv50_chan base;
+};
 
-int
-nv50_display_early_init(struct drm_device *dev)
+static void
+nv50_pioc_destroy(struct nouveau_object *core, struct nv50_pioc *pioc)
 {
-       return 0;
+       nv50_chan_destroy(core, &pioc->base);
 }
 
-void
-nv50_display_late_takedown(struct drm_device *dev)
+static int
+nv50_pioc_create(struct nouveau_object *core, u32 bclass, u8 head,
+                void *data, u32 size, struct nv50_pioc *pioc)
 {
+       return nv50_chan_create(core, bclass, head, data, size, &pioc->base);
 }
 
-int
-nv50_display_sync(struct drm_device *dev)
-{
-       struct nv50_display *disp = nv50_display(dev);
-       struct nouveau_channel *evo = disp->master;
-       int ret;
-
-       ret = RING_SPACE(evo, 6);
-       if (ret == 0) {
-               BEGIN_NV04(evo, 0, 0x0084, 1);
-               OUT_RING  (evo, 0x80000000);
-               BEGIN_NV04(evo, 0, 0x0080, 1);
-               OUT_RING  (evo, 0);
-               BEGIN_NV04(evo, 0, 0x0084, 1);
-               OUT_RING  (evo, 0x00000000);
+/******************************************************************************
+ * DMA EVO channel
+ *****************************************************************************/
 
-               nv_wo32(disp->ramin, 0x2000, 0x00000000);
-               FIRE_RING (evo);
+struct nv50_dmac {
+       struct nv50_chan base;
+       dma_addr_t handle;
+       u32 *ptr;
+};
 
-               if (nv_wait_ne(disp->ramin, 0x2000, 0xffffffff, 0x00000000))
-                       return 0;
+static void
+nv50_dmac_destroy(struct nouveau_object *core, struct nv50_dmac *dmac)
+{
+       if (dmac->ptr) {
+               struct pci_dev *pdev = nv_device(core)->pdev;
+               pci_free_consistent(pdev, PAGE_SIZE, dmac->ptr, dmac->handle);
        }
 
-       return 0;
+       nv50_chan_destroy(core, &dmac->base);
 }
 
-int
-nv50_display_init(struct drm_device *dev)
+static int
+nv50_dmac_create_fbdma(struct nouveau_object *core, u32 parent)
 {
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nouveau_device *device = nouveau_dev(dev);
-       struct nouveau_channel *evo;
-       int ret, i;
-       u32 val;
-
-       NV_DEBUG(drm, "\n");
-
-       nv_wr32(device, 0x00610184, nv_rd32(device, 0x00614004));
-
-       /*
-        * I think the 0x006101XX range is some kind of main control area
-        * that enables things.
-        */
-       /* CRTC? */
-       for (i = 0; i < 2; i++) {
-               val = nv_rd32(device, 0x00616100 + (i * 0x800));
-               nv_wr32(device, 0x00610190 + (i * 0x10), val);
-               val = nv_rd32(device, 0x00616104 + (i * 0x800));
-               nv_wr32(device, 0x00610194 + (i * 0x10), val);
-               val = nv_rd32(device, 0x00616108 + (i * 0x800));
-               nv_wr32(device, 0x00610198 + (i * 0x10), val);
-               val = nv_rd32(device, 0x0061610c + (i * 0x800));
-               nv_wr32(device, 0x0061019c + (i * 0x10), val);
-       }
-
-       /* DAC */
-       for (i = 0; i < 3; i++) {
-               val = nv_rd32(device, 0x0061a000 + (i * 0x800));
-               nv_wr32(device, 0x006101d0 + (i * 0x04), val);
-       }
-
-       /* SOR */
-       for (i = 0; i < nv50_sor_nr(dev); i++) {
-               val = nv_rd32(device, 0x0061c000 + (i * 0x800));
-               nv_wr32(device, 0x006101e0 + (i * 0x04), val);
-       }
-
-       /* EXT */
-       for (i = 0; i < 3; i++) {
-               val = nv_rd32(device, 0x0061e000 + (i * 0x800));
-               nv_wr32(device, 0x006101f0 + (i * 0x04), val);
-       }
-
-       for (i = 0; i < 3; i++) {
-               nv_wr32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(i), 0x00550000 |
-                       NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
-               nv_wr32(device, NV50_PDISPLAY_DAC_CLK_CTRL1(i), 0x00000001);
-       }
-
-       /* The precise purpose is unknown, i suspect it has something to do
-        * with text mode.
-        */
-       if (nv_rd32(device, NV50_PDISPLAY_INTR_1) & 0x100) {
-               nv_wr32(device, NV50_PDISPLAY_INTR_1, 0x100);
-               nv_wr32(device, 0x006194e8, nv_rd32(device, 0x006194e8) & ~1);
-               if (!nv_wait(device, 0x006194e8, 2, 0)) {
-                       NV_ERROR(drm, "timeout: (0x6194e8 & 2) != 0\n");
-                       NV_ERROR(drm, "0x6194e8 = 0x%08x\n",
-                                               nv_rd32(device, 0x6194e8));
-                       return -EBUSY;
-               }
-       }
-
-       for (i = 0; i < 2; i++) {
-               nv_wr32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0x2000);
-               if (!nv_wait(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
-                            NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) {
-                       NV_ERROR(drm, "timeout: CURSOR_CTRL2_STATUS == 0\n");
-                       NV_ERROR(drm, "CURSOR_CTRL2 = 0x%08x\n",
-                                nv_rd32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)));
-                       return -EBUSY;
-               }
-
-               nv_wr32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
-                       NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_ON);
-               if (!nv_wait(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
-                            NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS,
-                            NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS_ACTIVE)) {
-                       NV_ERROR(drm, "timeout: "
-                                     "CURSOR_CTRL2_STATUS_ACTIVE(%d)\n", i);
-                       NV_ERROR(drm, "CURSOR_CTRL2(%d) = 0x%08x\n", i,
-                                nv_rd32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)));
-                       return -EBUSY;
-               }
-       }
-
-       nv_wr32(device, NV50_PDISPLAY_PIO_CTRL, 0x00000000);
-       nv_mask(device, NV50_PDISPLAY_INTR_0, 0x00000000, 0x00000000);
-       nv_wr32(device, NV50_PDISPLAY_INTR_EN_0, 0x00000000);
-       nv_mask(device, NV50_PDISPLAY_INTR_1, 0x00000000, 0x00000000);
-       nv_wr32(device, NV50_PDISPLAY_INTR_EN_1,
-                    NV50_PDISPLAY_INTR_EN_1_CLK_UNK10 |
-                    NV50_PDISPLAY_INTR_EN_1_CLK_UNK20 |
-                    NV50_PDISPLAY_INTR_EN_1_CLK_UNK40);
-
-       ret = nv50_evo_init(dev);
+       struct nouveau_fb *pfb = nouveau_fb(core);
+       struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
+       struct nouveau_object *object;
+       int ret = nouveau_object_new(client, parent, NvEvoVRAM_LP,
+                                    NV_DMA_IN_MEMORY_CLASS,
+                                    &(struct nv_dma_class) {
+                                       .flags = NV_DMA_TARGET_VRAM |
+                                                NV_DMA_ACCESS_RDWR,
+                                       .start = 0,
+                                       .limit = pfb->ram.size - 1,
+                                       .conf0 = NV50_DMA_CONF0_ENABLE |
+                                                NV50_DMA_CONF0_PART_256,
+                                    }, sizeof(struct nv_dma_class), &object);
        if (ret)
                return ret;
-       evo = nv50_display(dev)->master;
-
-       nv_wr32(device, NV50_PDISPLAY_OBJECTS, (nv50_display(dev)->ramin->addr >> 8) | 9);
 
-       ret = RING_SPACE(evo, 3);
+       ret = nouveau_object_new(client, parent, NvEvoFB16,
+                                NV_DMA_IN_MEMORY_CLASS,
+                                &(struct nv_dma_class) {
+                                       .flags = NV_DMA_TARGET_VRAM |
+                                                NV_DMA_ACCESS_RDWR,
+                                       .start = 0,
+                                       .limit = pfb->ram.size - 1,
+                                       .conf0 = NV50_DMA_CONF0_ENABLE | 0x70 |
+                                                NV50_DMA_CONF0_PART_256,
+                                }, sizeof(struct nv_dma_class), &object);
        if (ret)
                return ret;
-       BEGIN_NV04(evo, 0, NV50_EVO_UNK84, 2);
-       OUT_RING  (evo, NV50_EVO_UNK84_NOTIFY_DISABLED);
-       OUT_RING  (evo, NvEvoSync);
 
-       return nv50_display_sync(dev);
+       ret = nouveau_object_new(client, parent, NvEvoFB32,
+                                NV_DMA_IN_MEMORY_CLASS,
+                                &(struct nv_dma_class) {
+                                       .flags = NV_DMA_TARGET_VRAM |
+                                                NV_DMA_ACCESS_RDWR,
+                                       .start = 0,
+                                       .limit = pfb->ram.size - 1,
+                                       .conf0 = NV50_DMA_CONF0_ENABLE | 0x7a |
+                                                NV50_DMA_CONF0_PART_256,
+                                }, sizeof(struct nv_dma_class), &object);
+       return ret;
 }
 
-void
-nv50_display_fini(struct drm_device *dev)
+static int
+nvc0_dmac_create_fbdma(struct nouveau_object *core, u32 parent)
 {
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nouveau_device *device = nouveau_dev(dev);
-       struct nv50_display *disp = nv50_display(dev);
-       struct nouveau_channel *evo = disp->master;
-       struct drm_crtc *drm_crtc;
-       int ret, i;
+       struct nouveau_fb *pfb = nouveau_fb(core);
+       struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
+       struct nouveau_object *object;
+       int ret = nouveau_object_new(client, parent, NvEvoVRAM_LP,
+                                    NV_DMA_IN_MEMORY_CLASS,
+                                    &(struct nv_dma_class) {
+                                       .flags = NV_DMA_TARGET_VRAM |
+                                                NV_DMA_ACCESS_RDWR,
+                                       .start = 0,
+                                       .limit = pfb->ram.size - 1,
+                                       .conf0 = NVC0_DMA_CONF0_ENABLE,
+                                    }, sizeof(struct nv_dma_class), &object);
+       if (ret)
+               return ret;
 
-       NV_DEBUG(drm, "\n");
+       ret = nouveau_object_new(client, parent, NvEvoFB16,
+                                NV_DMA_IN_MEMORY_CLASS,
+                                &(struct nv_dma_class) {
+                                       .flags = NV_DMA_TARGET_VRAM |
+                                                NV_DMA_ACCESS_RDWR,
+                                       .start = 0,
+                                       .limit = pfb->ram.size - 1,
+                                       .conf0 = NVC0_DMA_CONF0_ENABLE | 0xfe,
+                                }, sizeof(struct nv_dma_class), &object);
+       if (ret)
+               return ret;
 
-       list_for_each_entry(drm_crtc, &dev->mode_config.crtc_list, head) {
-               struct nouveau_crtc *crtc = nouveau_crtc(drm_crtc);
+       ret = nouveau_object_new(client, parent, NvEvoFB32,
+                                NV_DMA_IN_MEMORY_CLASS,
+                                &(struct nv_dma_class) {
+                                       .flags = NV_DMA_TARGET_VRAM |
+                                                NV_DMA_ACCESS_RDWR,
+                                       .start = 0,
+                                       .limit = pfb->ram.size - 1,
+                                       .conf0 = NVC0_DMA_CONF0_ENABLE | 0xfe,
+                                }, sizeof(struct nv_dma_class), &object);
+       return ret;
+}
 
-               nv50_crtc_blank(crtc, true);
-       }
+static int
+nvd0_dmac_create_fbdma(struct nouveau_object *core, u32 parent)
+{
+       struct nouveau_fb *pfb = nouveau_fb(core);
+       struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
+       struct nouveau_object *object;
+       int ret = nouveau_object_new(client, parent, NvEvoVRAM_LP,
+                                    NV_DMA_IN_MEMORY_CLASS,
+                                    &(struct nv_dma_class) {
+                                       .flags = NV_DMA_TARGET_VRAM |
+                                                NV_DMA_ACCESS_RDWR,
+                                       .start = 0,
+                                       .limit = pfb->ram.size - 1,
+                                       .conf0 = NVD0_DMA_CONF0_ENABLE |
+                                                NVD0_DMA_CONF0_PAGE_LP,
+                                    }, sizeof(struct nv_dma_class), &object);
+       if (ret)
+               return ret;
 
-       ret = RING_SPACE(evo, 2);
-       if (ret == 0) {
-               BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
-               OUT_RING(evo, 0);
-       }
-       FIRE_RING(evo);
+       ret = nouveau_object_new(client, parent, NvEvoFB32,
+                                NV_DMA_IN_MEMORY_CLASS,
+                                &(struct nv_dma_class) {
+                                       .flags = NV_DMA_TARGET_VRAM |
+                                                NV_DMA_ACCESS_RDWR,
+                                       .start = 0,
+                                       .limit = pfb->ram.size - 1,
+                                       .conf0 = NVD0_DMA_CONF0_ENABLE | 0xfe |
+                                                NVD0_DMA_CONF0_PAGE_LP,
+                                }, sizeof(struct nv_dma_class), &object);
+       return ret;
+}
 
-       /* Almost like ack'ing a vblank interrupt, maybe in the spirit of
-        * cleaning up?
-        */
-       list_for_each_entry(drm_crtc, &dev->mode_config.crtc_list, head) {
-               struct nouveau_crtc *crtc = nouveau_crtc(drm_crtc);
-               uint32_t mask = NV50_PDISPLAY_INTR_1_VBLANK_CRTC_(crtc->index);
+static int
+nv50_dmac_create(struct nouveau_object *core, u32 bclass, u8 head,
+                void *data, u32 size, u64 syncbuf,
+                struct nv50_dmac *dmac)
+{
+       struct nouveau_fb *pfb = nouveau_fb(core);
+       struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
+       struct nouveau_object *object;
+       u32 pushbuf = *(u32 *)data;
+       int ret;
 
-               if (!crtc->base.enabled)
-                       continue;
+       dmac->ptr = pci_alloc_consistent(nv_device(core)->pdev, PAGE_SIZE,
+                                       &dmac->handle);
+       if (!dmac->ptr)
+               return -ENOMEM;
 
-               nv_wr32(device, NV50_PDISPLAY_INTR_1, mask);
-               if (!nv_wait(device, NV50_PDISPLAY_INTR_1, mask, mask)) {
-                       NV_ERROR(drm, "timeout: (0x610024 & 0x%08x) == "
-                                     "0x%08x\n", mask, mask);
-                       NV_ERROR(drm, "0x610024 = 0x%08x\n",
-                                nv_rd32(device, NV50_PDISPLAY_INTR_1));
-               }
-       }
+       ret = nouveau_object_new(client, NVDRM_DEVICE, pushbuf,
+                                NV_DMA_FROM_MEMORY_CLASS,
+                                &(struct nv_dma_class) {
+                                       .flags = NV_DMA_TARGET_PCI_US |
+                                                NV_DMA_ACCESS_RD,
+                                       .start = dmac->handle + 0x0000,
+                                       .limit = dmac->handle + 0x0fff,
+                                }, sizeof(struct nv_dma_class), &object);
+       if (ret)
+               return ret;
 
-       for (i = 0; i < 2; i++) {
-               nv_wr32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0);
-               if (!nv_wait(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
-                            NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) {
-                       NV_ERROR(drm, "timeout: CURSOR_CTRL2_STATUS == 0\n");
-                       NV_ERROR(drm, "CURSOR_CTRL2 = 0x%08x\n",
-                                nv_rd32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)));
-               }
-       }
+       ret = nv50_chan_create(core, bclass, head, data, size, &dmac->base);
+       if (ret)
+               return ret;
 
-       nv50_evo_fini(dev);
+       ret = nouveau_object_new(client, dmac->base.handle, NvEvoSync,
+                                NV_DMA_IN_MEMORY_CLASS,
+                                &(struct nv_dma_class) {
+                                       .flags = NV_DMA_TARGET_VRAM |
+                                                NV_DMA_ACCESS_RDWR,
+                                       .start = syncbuf + 0x0000,
+                                       .limit = syncbuf + 0x0fff,
+                                }, sizeof(struct nv_dma_class), &object);
+       if (ret)
+               return ret;
 
-       for (i = 0; i < 3; i++) {
-               if (!nv_wait(device, NV50_PDISPLAY_SOR_DPMS_STATE(i),
-                            NV50_PDISPLAY_SOR_DPMS_STATE_WAIT, 0)) {
-                       NV_ERROR(drm, "timeout: SOR_DPMS_STATE_WAIT(%d) == 0\n", i);
-                       NV_ERROR(drm, "SOR_DPMS_STATE(%d) = 0x%08x\n", i,
-                                 nv_rd32(device, NV50_PDISPLAY_SOR_DPMS_STATE(i)));
-               }
-       }
+       ret = nouveau_object_new(client, dmac->base.handle, NvEvoVRAM,
+                                NV_DMA_IN_MEMORY_CLASS,
+                                &(struct nv_dma_class) {
+                                       .flags = NV_DMA_TARGET_VRAM |
+                                                NV_DMA_ACCESS_RDWR,
+                                       .start = 0,
+                                       .limit = pfb->ram.size - 1,
+                                }, sizeof(struct nv_dma_class), &object);
+       if (ret)
+               return ret;
 
-       /* disable interrupts. */
-       nv_wr32(device, NV50_PDISPLAY_INTR_EN_1, 0x00000000);
+       if (nv_device(core)->card_type < NV_C0)
+               ret = nv50_dmac_create_fbdma(core, dmac->base.handle);
+       else
+       if (nv_device(core)->card_type < NV_D0)
+               ret = nvc0_dmac_create_fbdma(core, dmac->base.handle);
+       else
+               ret = nvd0_dmac_create_fbdma(core, dmac->base.handle);
+       return ret;
 }
 
-int
-nv50_display_create(struct drm_device *dev)
+struct nv50_mast {
+       struct nv50_dmac base;
+};
+
+struct nv50_curs {
+       struct nv50_pioc base;
+};
+
+struct nv50_sync {
+       struct nv50_dmac base;
+       struct {
+               u32 offset;
+               u16 value;
+       } sem;
+};
+
+struct nv50_ovly {
+       struct nv50_dmac base;
+};
+
+struct nv50_oimm {
+       struct nv50_pioc base;
+};
+
+struct nv50_head {
+       struct nouveau_crtc base;
+       struct nv50_curs curs;
+       struct nv50_sync sync;
+       struct nv50_ovly ovly;
+       struct nv50_oimm oimm;
+};
+
+#define nv50_head(c) ((struct nv50_head *)nouveau_crtc(c))
+#define nv50_curs(c) (&nv50_head(c)->curs)
+#define nv50_sync(c) (&nv50_head(c)->sync)
+#define nv50_ovly(c) (&nv50_head(c)->ovly)
+#define nv50_oimm(c) (&nv50_head(c)->oimm)
+#define nv50_chan(c) (&(c)->base.base)
+#define nv50_vers(c) nv_mclass(nv50_chan(c)->user)
+
+struct nv50_disp {
+       struct nouveau_object *core;
+       struct nv50_mast mast;
+
+       u32 modeset;
+
+       struct nouveau_bo *sync;
+};
+
+static struct nv50_disp *
+nv50_disp(struct drm_device *dev)
 {
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct dcb_table *dcb = &drm->vbios.dcb;
-       struct drm_connector *connector, *ct;
-       struct nv50_display *priv;
-       int ret, i;
-
-       NV_DEBUG(drm, "\n");
+       return nouveau_display(dev)->priv;
+}
 
-       priv = kzalloc(sizeof(*priv), GFP_KERNEL);
-       if (!priv)
-               return -ENOMEM;
+#define nv50_mast(d) (&nv50_disp(d)->mast)
 
-       nouveau_display(dev)->priv = priv;
-       nouveau_display(dev)->dtor = nv50_display_destroy;
-       nouveau_display(dev)->init = nv50_display_init;
-       nouveau_display(dev)->fini = nv50_display_fini;
+static struct drm_crtc *
+nv50_display_crtc_get(struct drm_encoder *encoder)
+{
+       return nouveau_encoder(encoder)->crtc;
+}
 
-       /* Create CRTC objects */
-       for (i = 0; i < 2; i++) {
-               ret = nv50_crtc_create(dev, i);
-               if (ret)
-                       return ret;
-       }
+/******************************************************************************
+ * EVO channel helpers
+ *****************************************************************************/
+static u32 *
+evo_wait(void *evoc, int nr)
+{
+       struct nv50_dmac *dmac = evoc;
+       u32 put = nv_ro32(dmac->base.user, 0x0000) / 4;
 
-       /* We setup the encoders from the BIOS table */
-       for (i = 0 ; i < dcb->entries; i++) {
-               struct dcb_output *entry = &dcb->entry[i];
+       if (put + nr >= (PAGE_SIZE / 4) - 8) {
+               dmac->ptr[put] = 0x20000000;
 
-               if (entry->location != DCB_LOC_ON_CHIP) {
-                       NV_WARN(drm, "Off-chip encoder %d/%d unsupported\n",
-                               entry->type, ffs(entry->or) - 1);
-                       continue;
+               nv_wo32(dmac->base.user, 0x0000, 0x00000000);
+               if (!nv_wait(dmac->base.user, 0x0004, ~0, 0x00000000)) {
+                       NV_ERROR(dmac->base.user, "channel stalled\n");
+                       return NULL;
                }
 
-               connector = nouveau_connector_create(dev, entry->connector);
-               if (IS_ERR(connector))
-                       continue;
-
-               switch (entry->type) {
-               case DCB_OUTPUT_TMDS:
-               case DCB_OUTPUT_LVDS:
-               case DCB_OUTPUT_DP:
-                       nv50_sor_create(connector, entry);
-                       break;
-               case DCB_OUTPUT_ANALOG:
-                       nv50_dac_create(connector, entry);
-                       break;
-               default:
-                       NV_WARN(drm, "DCB encoder %d unknown\n", entry->type);
-                       continue;
-               }
+               put = 0;
        }
 
-       list_for_each_entry_safe(connector, ct,
-                                &dev->mode_config.connector_list, head) {
-               if (!connector->encoder_ids[0]) {
-                       NV_WARN(drm, "%s has no encoders, removing\n",
-                               drm_get_connector_name(connector));
-                       connector->funcs->destroy(connector);
-               }
-       }
+       return dmac->ptr + put;
+}
 
-       tasklet_init(&priv->tasklet, nv50_display_bh, (unsigned long)dev);
+static void
+evo_kick(u32 *push, void *evoc)
+{
+       struct nv50_dmac *dmac = evoc;
+       nv_wo32(dmac->base.user, 0x0000, (push - dmac->ptr) << 2);
+}
 
-       ret = nv50_evo_create(dev);
-       if (ret) {
-               nv50_display_destroy(dev);
-               return ret;
-       }
+#define evo_mthd(p,m,s) *((p)++) = (((s) << 18) | (m))
+#define evo_data(p,d)   *((p)++) = (d)
 
-       return 0;
+static bool
+evo_sync_wait(void *data)
+{
+       return nouveau_bo_rd32(data, EVO_MAST_NTFY) != 0x00000000;
 }
 
-void
-nv50_display_destroy(struct drm_device *dev)
+static int
+evo_sync(struct drm_device *dev)
 {
-       struct nv50_display *disp = nv50_display(dev);
+       struct nouveau_device *device = nouveau_dev(dev);
+       struct nv50_disp *disp = nv50_disp(dev);
+       struct nv50_mast *mast = nv50_mast(dev);
+       u32 *push = evo_wait(mast, 8);
+       if (push) {
+               nouveau_bo_wr32(disp->sync, EVO_MAST_NTFY, 0x00000000);
+               evo_mthd(push, 0x0084, 1);
+               evo_data(push, 0x80000000 | EVO_MAST_NTFY);
+               evo_mthd(push, 0x0080, 2);
+               evo_data(push, 0x00000000);
+               evo_data(push, 0x00000000);
+               evo_kick(push, mast);
+               if (nv_wait_cb(device, evo_sync_wait, disp->sync))
+                       return 0;
+       }
 
-       nv50_evo_destroy(dev);
-       kfree(disp);
+       return -EBUSY;
 }
 
+/******************************************************************************
+ * Page flipping channel
+ *****************************************************************************/
 struct nouveau_bo *
 nv50_display_crtc_sema(struct drm_device *dev, int crtc)
 {
-       return nv50_display(dev)->crtc[crtc].sem.bo;
+       return nv50_disp(dev)->sync;
 }
 
 void
 nv50_display_flip_stop(struct drm_crtc *crtc)
 {
-       struct nv50_display *disp = nv50_display(crtc->dev);
-       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-       struct nv50_display_crtc *dispc = &disp->crtc[nv_crtc->index];
-       struct nouveau_channel *evo = dispc->sync;
-       int ret;
-
-       ret = RING_SPACE(evo, 8);
-       if (ret) {
-               WARN_ON(1);
-               return;
+       struct nv50_sync *sync = nv50_sync(crtc);
+       u32 *push;
+
+       push = evo_wait(sync, 8);
+       if (push) {
+               evo_mthd(push, 0x0084, 1);
+               evo_data(push, 0x00000000);
+               evo_mthd(push, 0x0094, 1);
+               evo_data(push, 0x00000000);
+               evo_mthd(push, 0x00c0, 1);
+               evo_data(push, 0x00000000);
+               evo_mthd(push, 0x0080, 1);
+               evo_data(push, 0x00000000);
+               evo_kick(push, sync);
        }
-
-       BEGIN_NV04(evo, 0, 0x0084, 1);
-       OUT_RING  (evo, 0x00000000);
-       BEGIN_NV04(evo, 0, 0x0094, 1);
-       OUT_RING  (evo, 0x00000000);
-       BEGIN_NV04(evo, 0, 0x00c0, 1);
-       OUT_RING  (evo, 0x00000000);
-       BEGIN_NV04(evo, 0, 0x0080, 1);
-       OUT_RING  (evo, 0x00000000);
-       FIRE_RING (evo);
 }
 
 int
 nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
-                      struct nouveau_channel *chan)
+                      struct nouveau_channel *chan, u32 swap_interval)
 {
-       struct nouveau_drm *drm = nouveau_drm(crtc->dev);
        struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
-       struct nv50_display *disp = nv50_display(crtc->dev);
+       struct nv50_disp *disp = nv50_disp(crtc->dev);
        struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-       struct nv50_display_crtc *dispc = &disp->crtc[nv_crtc->index];
-       struct nouveau_channel *evo = dispc->sync;
+       struct nv50_sync *sync = nv50_sync(crtc);
+       u32 *push;
        int ret;
 
-       ret = RING_SPACE(evo, chan ? 25 : 27);
-       if (unlikely(ret))
-               return ret;
+       swap_interval <<= 4;
+       if (swap_interval == 0)
+               swap_interval |= 0x100;
+
+       push = evo_wait(sync, 128);
+       if (unlikely(push == NULL))
+               return -EBUSY;
 
        /* synchronise with the rendering channel, if necessary */
        if (likely(chan)) {
                ret = RING_SPACE(chan, 10);
-               if (ret) {
-                       WIND_RING(evo);
+               if (ret)
                        return ret;
-               }
 
-               if (nv_device(drm->device)->chipset < 0xc0) {
-                       BEGIN_NV04(chan, 0, 0x0060, 2);
+               if (nv_mclass(chan->object) < NVC0_CHANNEL_IND_CLASS) {
+                       BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2);
                        OUT_RING  (chan, NvEvoSema0 + nv_crtc->index);
-                       OUT_RING  (chan, dispc->sem.offset);
-                       BEGIN_NV04(chan, 0, 0x006c, 1);
-                       OUT_RING  (chan, 0xf00d0000 | dispc->sem.value);
-                       BEGIN_NV04(chan, 0, 0x0064, 2);
-                       OUT_RING  (chan, dispc->sem.offset ^ 0x10);
+                       OUT_RING  (chan, sync->sem.offset);
+                       BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1);
+                       OUT_RING  (chan, 0xf00d0000 | sync->sem.value);
+                       BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_OFFSET, 2);
+                       OUT_RING  (chan, sync->sem.offset ^ 0x10);
                        OUT_RING  (chan, 0x74b1e000);
-                       BEGIN_NV04(chan, 0, 0x0060, 1);
-                       if (nv_device(drm->device)->chipset < 0x84)
+                       BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
+                       if (nv_mclass(chan->object) < NV84_CHANNEL_DMA_CLASS)
                                OUT_RING  (chan, NvSema);
                        else
                                OUT_RING  (chan, chan->vram);
                } else {
                        u64 offset = nvc0_fence_crtc(chan, nv_crtc->index);
-                       offset += dispc->sem.offset;
-                       BEGIN_NVC0(chan, 0, 0x0010, 4);
+                       offset += sync->sem.offset;
+
+                       BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
                        OUT_RING  (chan, upper_32_bits(offset));
                        OUT_RING  (chan, lower_32_bits(offset));
-                       OUT_RING  (chan, 0xf00d0000 | dispc->sem.value);
+                       OUT_RING  (chan, 0xf00d0000 | sync->sem.value);
                        OUT_RING  (chan, 0x1002);
-                       BEGIN_NVC0(chan, 0, 0x0010, 4);
+                       BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
                        OUT_RING  (chan, upper_32_bits(offset));
                        OUT_RING  (chan, lower_32_bits(offset ^ 0x10));
                        OUT_RING  (chan, 0x74b1e000);
                        OUT_RING  (chan, 0x1001);
                }
+
                FIRE_RING (chan);
        } else {
-               nouveau_bo_wr32(dispc->sem.bo, dispc->sem.offset / 4,
-                               0xf00d0000 | dispc->sem.value);
+               nouveau_bo_wr32(disp->sync, sync->sem.offset / 4,
+                               0xf00d0000 | sync->sem.value);
+               evo_sync(crtc->dev);
        }
 
-       /* queue the flip on the crtc's "display sync" channel */
-       BEGIN_NV04(evo, 0, 0x0100, 1);
-       OUT_RING  (evo, 0xfffe0000);
-       if (chan) {
-               BEGIN_NV04(evo, 0, 0x0084, 1);
-               OUT_RING  (evo, 0x00000100);
+       /* queue the flip */
+       evo_mthd(push, 0x0100, 1);
+       evo_data(push, 0xfffe0000);
+       evo_mthd(push, 0x0084, 1);
+       evo_data(push, swap_interval);
+       if (!(swap_interval & 0x00000100)) {
+               evo_mthd(push, 0x00e0, 1);
+               evo_data(push, 0x40000000);
+       }
+       evo_mthd(push, 0x0088, 4);
+       evo_data(push, sync->sem.offset);
+       evo_data(push, 0xf00d0000 | sync->sem.value);
+       evo_data(push, 0x74b1e000);
+       evo_data(push, NvEvoSync);
+       evo_mthd(push, 0x00a0, 2);
+       evo_data(push, 0x00000000);
+       evo_data(push, 0x00000000);
+       evo_mthd(push, 0x00c0, 1);
+       evo_data(push, nv_fb->r_dma);
+       evo_mthd(push, 0x0110, 2);
+       evo_data(push, 0x00000000);
+       evo_data(push, 0x00000000);
+       if (nv50_vers(sync) < NVD0_DISP_SYNC_CLASS) {
+               evo_mthd(push, 0x0800, 5);
+               evo_data(push, nv_fb->nvbo->bo.offset >> 8);
+               evo_data(push, 0);
+               evo_data(push, (fb->height << 16) | fb->width);
+               evo_data(push, nv_fb->r_pitch);
+               evo_data(push, nv_fb->r_format);
        } else {
-               BEGIN_NV04(evo, 0, 0x0084, 1);
-               OUT_RING  (evo, 0x00000010);
-               /* allows gamma somehow, PDISP will bitch at you if
-                * you don't wait for vblank before changing this..
-                */
-               BEGIN_NV04(evo, 0, 0x00e0, 1);
-               OUT_RING  (evo, 0x40000000);
-       }
-       BEGIN_NV04(evo, 0, 0x0088, 4);
-       OUT_RING  (evo, dispc->sem.offset);
-       OUT_RING  (evo, 0xf00d0000 | dispc->sem.value);
-       OUT_RING  (evo, 0x74b1e000);
-       OUT_RING  (evo, NvEvoSync);
-       BEGIN_NV04(evo, 0, 0x00a0, 2);
-       OUT_RING  (evo, 0x00000000);
-       OUT_RING  (evo, 0x00000000);
-       BEGIN_NV04(evo, 0, 0x00c0, 1);
-       OUT_RING  (evo, nv_fb->r_dma);
-       BEGIN_NV04(evo, 0, 0x0110, 2);
-       OUT_RING  (evo, 0x00000000);
-       OUT_RING  (evo, 0x00000000);
-       BEGIN_NV04(evo, 0, 0x0800, 5);
-       OUT_RING  (evo, nv_fb->nvbo->bo.offset >> 8);
-       OUT_RING  (evo, 0);
-       OUT_RING  (evo, (fb->height << 16) | fb->width);
-       OUT_RING  (evo, nv_fb->r_pitch);
-       OUT_RING  (evo, nv_fb->r_format);
-       BEGIN_NV04(evo, 0, 0x0080, 1);
-       OUT_RING  (evo, 0x00000000);
-       FIRE_RING (evo);
-
-       dispc->sem.offset ^= 0x10;
-       dispc->sem.value++;
+               evo_mthd(push, 0x0400, 5);
+               evo_data(push, nv_fb->nvbo->bo.offset >> 8);
+               evo_data(push, 0);
+               evo_data(push, (fb->height << 16) | fb->width);
+               evo_data(push, nv_fb->r_pitch);
+               evo_data(push, nv_fb->r_format);
+       }
+       evo_mthd(push, 0x0080, 1);
+       evo_data(push, 0x00000000);
+       evo_kick(push, sync);
+
+       sync->sem.offset ^= 0x10;
+       sync->sem.value++;
        return 0;
 }
 
-static u16
-nv50_display_script_select(struct drm_device *dev, struct dcb_output *dcb,
-                          u32 mc, int pxclk)
+/******************************************************************************
+ * CRTC
+ *****************************************************************************/
+static int
+nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update)
 {
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nouveau_connector *nv_connector = NULL;
-       struct drm_encoder *encoder;
-       struct nvbios *bios = &drm->vbios;
-       u32 script = 0, or;
-
-       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-               struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-
-               if (nv_encoder->dcb != dcb)
-                       continue;
+       struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+       struct nouveau_connector *nv_connector;
+       struct drm_connector *connector;
+       u32 *push, mode = 0x00;
+
+       nv_connector = nouveau_crtc_connector_get(nv_crtc);
+       connector = &nv_connector->base;
+       if (nv_connector->dithering_mode == DITHERING_MODE_AUTO) {
+               if (nv_crtc->base.fb->depth > connector->display_info.bpc * 3)
+                       mode = DITHERING_MODE_DYNAMIC2X2;
+       } else {
+               mode = nv_connector->dithering_mode;
+       }
 
-               nv_connector = nouveau_encoder_connector_get(nv_encoder);
-               break;
+       if (nv_connector->dithering_depth == DITHERING_DEPTH_AUTO) {
+               if (connector->display_info.bpc >= 8)
+                       mode |= DITHERING_DEPTH_8BPC;
+       } else {
+               mode |= nv_connector->dithering_depth;
        }
 
-       or = ffs(dcb->or) - 1;
-       switch (dcb->type) {
-       case DCB_OUTPUT_LVDS:
-               script = (mc >> 8) & 0xf;
-               if (bios->fp_no_ddc) {
-                       if (bios->fp.dual_link)
-                               script |= 0x0100;
-                       if (bios->fp.if_is_24bit)
-                               script |= 0x0200;
+       push = evo_wait(mast, 4);
+       if (push) {
+               if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+                       evo_mthd(push, 0x08a0 + (nv_crtc->index * 0x0400), 1);
+                       evo_data(push, mode);
+               } else
+               if (nv50_vers(mast) < NVE0_DISP_MAST_CLASS) {
+                       evo_mthd(push, 0x0490 + (nv_crtc->index * 0x0300), 1);
+                       evo_data(push, mode);
                } else {
-                       /* determine number of lvds links */
-                       if (nv_connector && nv_connector->edid &&
-                           nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
-                               /* http://www.spwg.org */
-                               if (((u8 *)nv_connector->edid)[121] == 2)
-                                       script |= 0x0100;
-                       } else
-                       if (pxclk >= bios->fp.duallink_transition_clk) {
-                               script |= 0x0100;
-                       }
-
-                       /* determine panel depth */
-                       if (script & 0x0100) {
-                               if (bios->fp.strapless_is_24bit & 2)
-                                       script |= 0x0200;
-                       } else {
-                               if (bios->fp.strapless_is_24bit & 1)
-                                       script |= 0x0200;
-                       }
+                       evo_mthd(push, 0x04a0 + (nv_crtc->index * 0x0300), 1);
+                       evo_data(push, mode);
+               }
 
-                       if (nv_connector && nv_connector->edid &&
-                           (nv_connector->edid->revision >= 4) &&
-                           (nv_connector->edid->input & 0x70) >= 0x20)
-                               script |= 0x0200;
+               if (update) {
+                       evo_mthd(push, 0x0080, 1);
+                       evo_data(push, 0x00000000);
                }
-               break;
-       case DCB_OUTPUT_TMDS:
-               script = (mc >> 8) & 0xf;
-               if (pxclk >= 165000)
-                       script |= 0x0100;
-               break;
-       case DCB_OUTPUT_DP:
-               script = (mc >> 8) & 0xf;
-               break;
-       case DCB_OUTPUT_ANALOG:
-               script = 0xff;
-               break;
-       default:
-               NV_ERROR(drm, "modeset on unsupported output type!\n");
-               break;
+               evo_kick(push, mast);
        }
 
-       return script;
+       return 0;
 }
 
-static void
-nv50_display_unk10_handler(struct drm_device *dev)
+static int
+nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
 {
-       struct nouveau_device *device = nouveau_dev(dev);
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nv50_display *disp = nv50_display(dev);
-       u32 unk30 = nv_rd32(device, 0x610030), mc;
-       int i, crtc, or = 0, type = DCB_OUTPUT_ANY;
-
-       NV_DEBUG(drm, "0x610030: 0x%08x\n", unk30);
-       disp->irq.dcb = NULL;
-
-       nv_wr32(device, 0x619494, nv_rd32(device, 0x619494) & ~8);
-
-       /* Determine which CRTC we're dealing with, only 1 ever will be
-        * signalled at the same time with the current nouveau code.
+       struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+       struct drm_display_mode *omode, *umode = &nv_crtc->base.mode;
+       struct drm_crtc *crtc = &nv_crtc->base;
+       struct nouveau_connector *nv_connector;
+       int mode = DRM_MODE_SCALE_NONE;
+       u32 oX, oY, *push;
+
+       /* start off at the resolution we programmed the crtc for, this
+        * effectively handles NONE/FULL scaling
         */
-       crtc = ffs((unk30 & 0x00000060) >> 5) - 1;
-       if (crtc < 0)
-               goto ack;
-
-       /* Nothing needs to be done for the encoder */
-       crtc = ffs((unk30 & 0x00000180) >> 7) - 1;
-       if (crtc < 0)
-               goto ack;
-
-       /* Find which encoder was connected to the CRTC */
-       for (i = 0; type == DCB_OUTPUT_ANY && i < 3; i++) {
-               mc = nv_rd32(device, NV50_PDISPLAY_DAC_MODE_CTRL_C(i));
-               NV_DEBUG(drm, "DAC-%d mc: 0x%08x\n", i, mc);
-               if (!(mc & (1 << crtc)))
-                       continue;
-
-               switch ((mc & 0x00000f00) >> 8) {
-               case 0: type = DCB_OUTPUT_ANALOG; break;
-               case 1: type = DCB_OUTPUT_TV; break;
-               default:
-                       NV_ERROR(drm, "invalid mc, DAC-%d: 0x%08x\n", i, mc);
-                       goto ack;
+       nv_connector = nouveau_crtc_connector_get(nv_crtc);
+       if (nv_connector && nv_connector->native_mode)
+               mode = nv_connector->scaling_mode;
+
+       if (mode != DRM_MODE_SCALE_NONE)
+               omode = nv_connector->native_mode;
+       else
+               omode = umode;
+
+       oX = omode->hdisplay;
+       oY = omode->vdisplay;
+       if (omode->flags & DRM_MODE_FLAG_DBLSCAN)
+               oY *= 2;
+
+       /* add overscan compensation if necessary, will keep the aspect
+        * ratio the same as the backend mode unless overridden by the
+        * user setting both hborder and vborder properties.
+        */
+       if (nv_connector && ( nv_connector->underscan == UNDERSCAN_ON ||
+                            (nv_connector->underscan == UNDERSCAN_AUTO &&
+                             nv_connector->edid &&
+                             drm_detect_hdmi_monitor(nv_connector->edid)))) {
+               u32 bX = nv_connector->underscan_hborder;
+               u32 bY = nv_connector->underscan_vborder;
+               u32 aspect = (oY << 19) / oX;
+
+               if (bX) {
+                       oX -= (bX * 2);
+                       if (bY) oY -= (bY * 2);
+                       else    oY  = ((oX * aspect) + (aspect / 2)) >> 19;
+               } else {
+                       oX -= (oX >> 4) + 32;
+                       if (bY) oY -= (bY * 2);
+                       else    oY  = ((oX * aspect) + (aspect / 2)) >> 19;
                }
-
-               or = i;
        }
 
-       for (i = 0; type == DCB_OUTPUT_ANY && i < nv50_sor_nr(dev); i++) {
-               if (nv_device(drm->device)->chipset  < 0x90 ||
-                   nv_device(drm->device)->chipset == 0x92 ||
-                   nv_device(drm->device)->chipset == 0xa0)
-                       mc = nv_rd32(device, NV50_PDISPLAY_SOR_MODE_CTRL_C(i));
-               else
-                       mc = nv_rd32(device, NV90_PDISPLAY_SOR_MODE_CTRL_C(i));
-
-               NV_DEBUG(drm, "SOR-%d mc: 0x%08x\n", i, mc);
-               if (!(mc & (1 << crtc)))
-                       continue;
-
-               switch ((mc & 0x00000f00) >> 8) {
-               case 0: type = DCB_OUTPUT_LVDS; break;
-               case 1: type = DCB_OUTPUT_TMDS; break;
-               case 2: type = DCB_OUTPUT_TMDS; break;
-               case 5: type = DCB_OUTPUT_TMDS; break;
-               case 8: type = DCB_OUTPUT_DP; break;
-               case 9: type = DCB_OUTPUT_DP; break;
-               default:
-                       NV_ERROR(drm, "invalid mc, SOR-%d: 0x%08x\n", i, mc);
-                       goto ack;
+       /* handle CENTER/ASPECT scaling, taking into account the areas
+        * removed already for overscan compensation
+        */
+       switch (mode) {
+       case DRM_MODE_SCALE_CENTER:
+               oX = min((u32)umode->hdisplay, oX);
+               oY = min((u32)umode->vdisplay, oY);
+               /* fall-through */
+       case DRM_MODE_SCALE_ASPECT:
+               if (oY < oX) {
+                       u32 aspect = (umode->hdisplay << 19) / umode->vdisplay;
+                       oX = ((oY * aspect) + (aspect / 2)) >> 19;
+               } else {
+                       u32 aspect = (umode->vdisplay << 19) / umode->hdisplay;
+                       oY = ((oX * aspect) + (aspect / 2)) >> 19;
                }
-
-               or = i;
+               break;
+       default:
+               break;
        }
 
-       /* There was no encoder to disable */
-       if (type == DCB_OUTPUT_ANY)
-               goto ack;
+       push = evo_wait(mast, 8);
+       if (push) {
+               if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+                       /*XXX: SCALE_CTRL_ACTIVE??? */
+                       evo_mthd(push, 0x08d8 + (nv_crtc->index * 0x400), 2);
+                       evo_data(push, (oY << 16) | oX);
+                       evo_data(push, (oY << 16) | oX);
+                       evo_mthd(push, 0x08a4 + (nv_crtc->index * 0x400), 1);
+                       evo_data(push, 0x00000000);
+                       evo_mthd(push, 0x08c8 + (nv_crtc->index * 0x400), 1);
+                       evo_data(push, umode->vdisplay << 16 | umode->hdisplay);
+               } else {
+                       evo_mthd(push, 0x04c0 + (nv_crtc->index * 0x300), 3);
+                       evo_data(push, (oY << 16) | oX);
+                       evo_data(push, (oY << 16) | oX);
+                       evo_data(push, (oY << 16) | oX);
+                       evo_mthd(push, 0x0494 + (nv_crtc->index * 0x300), 1);
+                       evo_data(push, 0x00000000);
+                       evo_mthd(push, 0x04b8 + (nv_crtc->index * 0x300), 1);
+                       evo_data(push, umode->vdisplay << 16 | umode->hdisplay);
+               }
 
-       /* Disable the encoder */
-       for (i = 0; i < drm->vbios.dcb.entries; i++) {
-               struct dcb_output *dcb = &drm->vbios.dcb.entry[i];
+               evo_kick(push, mast);
 
-               if (dcb->type == type && (dcb->or & (1 << or))) {
-                       nouveau_bios_run_display_table(dev, 0, -1, dcb, -1);
-                       disp->irq.dcb = dcb;
-                       goto ack;
+               if (update) {
+                       nv50_display_flip_stop(crtc);
+                       nv50_display_flip_next(crtc, crtc->fb, NULL, 1);
                }
        }
 
-       NV_ERROR(drm, "no dcb for %d %d 0x%08x\n", or, type, mc);
-ack:
-       nv_wr32(device, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK10);
-       nv_wr32(device, 0x610030, 0x80000000);
+       return 0;
 }
 
-static void
-nv50_display_unk20_handler(struct drm_device *dev)
+static int
+nv50_crtc_set_color_vibrance(struct nouveau_crtc *nv_crtc, bool update)
 {
-       struct nouveau_device *device = nouveau_dev(dev);
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nv50_display *disp = nv50_display(dev);
-       u32 unk30 = nv_rd32(device, 0x610030), tmp, pclk, script, mc = 0;
-       struct dcb_output *dcb;
-       int i, crtc, or = 0, type = DCB_OUTPUT_ANY;
-
-       NV_DEBUG(drm, "0x610030: 0x%08x\n", unk30);
-       dcb = disp->irq.dcb;
-       if (dcb) {
-               nouveau_bios_run_display_table(dev, 0, -2, dcb, -1);
-               disp->irq.dcb = NULL;
-       }
-
-       /* CRTC clock change requested? */
-       crtc = ffs((unk30 & 0x00000600) >> 9) - 1;
-       if (crtc >= 0) {
-               pclk  = nv_rd32(device, NV50_PDISPLAY_CRTC_P(crtc, CLOCK));
-               pclk &= 0x003fffff;
-               if (pclk)
-                       nv50_crtc_set_clock(dev, crtc, pclk);
-
-               tmp = nv_rd32(device, NV50_PDISPLAY_CRTC_CLK_CTRL2(crtc));
-               tmp &= ~0x000000f;
-               nv_wr32(device, NV50_PDISPLAY_CRTC_CLK_CTRL2(crtc), tmp);
-       }
-
-       /* Nothing needs to be done for the encoder */
-       crtc = ffs((unk30 & 0x00000180) >> 7) - 1;
-       if (crtc < 0)
-               goto ack;
-       pclk  = nv_rd32(device, NV50_PDISPLAY_CRTC_P(crtc, CLOCK)) & 0x003fffff;
-
-       /* Find which encoder is connected to the CRTC */
-       for (i = 0; type == DCB_OUTPUT_ANY && i < 3; i++) {
-               mc = nv_rd32(device, NV50_PDISPLAY_DAC_MODE_CTRL_P(i));
-               NV_DEBUG(drm, "DAC-%d mc: 0x%08x\n", i, mc);
-               if (!(mc & (1 << crtc)))
-                       continue;
-
-               switch ((mc & 0x00000f00) >> 8) {
-               case 0: type = DCB_OUTPUT_ANALOG; break;
-               case 1: type = DCB_OUTPUT_TV; break;
-               default:
-                       NV_ERROR(drm, "invalid mc, DAC-%d: 0x%08x\n", i, mc);
-                       goto ack;
+       struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+       u32 *push, hue, vib;
+       int adj;
+
+       adj = (nv_crtc->color_vibrance > 0) ? 50 : 0;
+       vib = ((nv_crtc->color_vibrance * 2047 + adj) / 100) & 0xfff;
+       hue = ((nv_crtc->vibrant_hue * 2047) / 100) & 0xfff;
+
+       push = evo_wait(mast, 16);
+       if (push) {
+               if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+                       evo_mthd(push, 0x08a8 + (nv_crtc->index * 0x400), 1);
+                       evo_data(push, (hue << 20) | (vib << 8));
+               } else {
+                       evo_mthd(push, 0x0498 + (nv_crtc->index * 0x300), 1);
+                       evo_data(push, (hue << 20) | (vib << 8));
                }
 
-               or = i;
+               if (update) {
+                       evo_mthd(push, 0x0080, 1);
+                       evo_data(push, 0x00000000);
+               }
+               evo_kick(push, mast);
        }
 
-       for (i = 0; type == DCB_OUTPUT_ANY && i < nv50_sor_nr(dev); i++) {
-               if (nv_device(drm->device)->chipset  < 0x90 ||
-                   nv_device(drm->device)->chipset == 0x92 ||
-                   nv_device(drm->device)->chipset == 0xa0)
-                       mc = nv_rd32(device, NV50_PDISPLAY_SOR_MODE_CTRL_P(i));
-               else
-                       mc = nv_rd32(device, NV90_PDISPLAY_SOR_MODE_CTRL_P(i));
-
-               NV_DEBUG(drm, "SOR-%d mc: 0x%08x\n", i, mc);
-               if (!(mc & (1 << crtc)))
-                       continue;
+       return 0;
+}
 
-               switch ((mc & 0x00000f00) >> 8) {
-               case 0: type = DCB_OUTPUT_LVDS; break;
-               case 1: type = DCB_OUTPUT_TMDS; break;
-               case 2: type = DCB_OUTPUT_TMDS; break;
-               case 5: type = DCB_OUTPUT_TMDS; break;
-               case 8: type = DCB_OUTPUT_DP; break;
-               case 9: type = DCB_OUTPUT_DP; break;
-               default:
-                       NV_ERROR(drm, "invalid mc, SOR-%d: 0x%08x\n", i, mc);
-                       goto ack;
+static int
+nv50_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb,
+                   int x, int y, bool update)
+{
+       struct nouveau_framebuffer *nvfb = nouveau_framebuffer(fb);
+       struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+       u32 *push;
+
+       push = evo_wait(mast, 16);
+       if (push) {
+               if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+                       evo_mthd(push, 0x0860 + (nv_crtc->index * 0x400), 1);
+                       evo_data(push, nvfb->nvbo->bo.offset >> 8);
+                       evo_mthd(push, 0x0868 + (nv_crtc->index * 0x400), 3);
+                       evo_data(push, (fb->height << 16) | fb->width);
+                       evo_data(push, nvfb->r_pitch);
+                       evo_data(push, nvfb->r_format);
+                       evo_mthd(push, 0x08c0 + (nv_crtc->index * 0x400), 1);
+                       evo_data(push, (y << 16) | x);
+                       if (nv50_vers(mast) > NV50_DISP_MAST_CLASS) {
+                               evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
+                               evo_data(push, nvfb->r_dma);
+                       }
+               } else {
+                       evo_mthd(push, 0x0460 + (nv_crtc->index * 0x300), 1);
+                       evo_data(push, nvfb->nvbo->bo.offset >> 8);
+                       evo_mthd(push, 0x0468 + (nv_crtc->index * 0x300), 4);
+                       evo_data(push, (fb->height << 16) | fb->width);
+                       evo_data(push, nvfb->r_pitch);
+                       evo_data(push, nvfb->r_format);
+                       evo_data(push, nvfb->r_dma);
+                       evo_mthd(push, 0x04b0 + (nv_crtc->index * 0x300), 1);
+                       evo_data(push, (y << 16) | x);
                }
 
-               or = i;
+               if (update) {
+                       evo_mthd(push, 0x0080, 1);
+                       evo_data(push, 0x00000000);
+               }
+               evo_kick(push, mast);
        }
 
-       if (type == DCB_OUTPUT_ANY)
-               goto ack;
+       nv_crtc->fb.tile_flags = nvfb->r_dma;
+       return 0;
+}
 
-       /* Enable the encoder */
-       for (i = 0; i < drm->vbios.dcb.entries; i++) {
-               dcb = &drm->vbios.dcb.entry[i];
-               if (dcb->type == type && (dcb->or & (1 << or)))
-                       break;
+static void
+nv50_crtc_cursor_show(struct nouveau_crtc *nv_crtc)
+{
+       struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+       u32 *push = evo_wait(mast, 16);
+       if (push) {
+               if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) {
+                       evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 2);
+                       evo_data(push, 0x85000000);
+                       evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
+               } else
+               if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+                       evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 2);
+                       evo_data(push, 0x85000000);
+                       evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
+                       evo_mthd(push, 0x089c + (nv_crtc->index * 0x400), 1);
+                       evo_data(push, NvEvoVRAM);
+               } else {
+                       evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 2);
+                       evo_data(push, 0x85000000);
+                       evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
+                       evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1);
+                       evo_data(push, NvEvoVRAM);
+               }
+               evo_kick(push, mast);
        }
+}
 
-       if (i == drm->vbios.dcb.entries) {
-               NV_ERROR(drm, "no dcb for %d %d 0x%08x\n", or, type, mc);
-               goto ack;
+static void
+nv50_crtc_cursor_hide(struct nouveau_crtc *nv_crtc)
+{
+       struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+       u32 *push = evo_wait(mast, 16);
+       if (push) {
+               if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) {
+                       evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 1);
+                       evo_data(push, 0x05000000);
+               } else
+               if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+                       evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 1);
+                       evo_data(push, 0x05000000);
+                       evo_mthd(push, 0x089c + (nv_crtc->index * 0x400), 1);
+                       evo_data(push, 0x00000000);
+               } else {
+                       evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 1);
+                       evo_data(push, 0x05000000);
+                       evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1);
+                       evo_data(push, 0x00000000);
+               }
+               evo_kick(push, mast);
        }
+}
 
-       script = nv50_display_script_select(dev, dcb, mc, pclk);
-       nouveau_bios_run_display_table(dev, script, pclk, dcb, -1);
-
-       if (type == DCB_OUTPUT_DP) {
-               int link = !(dcb->dpconf.sor.link & 1);
-               if ((mc & 0x000f0000) == 0x00020000)
-                       nv50_sor_dp_calc_tu(dev, or, link, pclk, 18);
-               else
-                       nv50_sor_dp_calc_tu(dev, or, link, pclk, 24);
+static void
+nv50_crtc_cursor_show_hide(struct nouveau_crtc *nv_crtc, bool show, bool update)
+{
+       struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+
+       if (show)
+               nv50_crtc_cursor_show(nv_crtc);
+       else
+               nv50_crtc_cursor_hide(nv_crtc);
+
+       if (update) {
+               u32 *push = evo_wait(mast, 2);
+               if (push) {
+                       evo_mthd(push, 0x0080, 1);
+                       evo_data(push, 0x00000000);
+                       evo_kick(push, mast);
+               }
        }
+}
 
-       if (dcb->type != DCB_OUTPUT_ANALOG) {
-               tmp = nv_rd32(device, NV50_PDISPLAY_SOR_CLK_CTRL2(or));
-               tmp &= ~0x00000f0f;
-               if (script & 0x0100)
-                       tmp |= 0x00000101;
-               nv_wr32(device, NV50_PDISPLAY_SOR_CLK_CTRL2(or), tmp);
-       } else {
-               nv_wr32(device, NV50_PDISPLAY_DAC_CLK_CTRL2(or), 0);
-       }
+static void
+nv50_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+}
+
+static void
+nv50_crtc_prepare(struct drm_crtc *crtc)
+{
+       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+       struct nv50_mast *mast = nv50_mast(crtc->dev);
+       u32 *push;
+
+       nv50_display_flip_stop(crtc);
+
+       push = evo_wait(mast, 2);
+       if (push) {
+               if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) {
+                       evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
+                       evo_data(push, 0x00000000);
+                       evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 1);
+                       evo_data(push, 0x40000000);
+               } else
+               if (nv50_vers(mast) <  NVD0_DISP_MAST_CLASS) {
+                       evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
+                       evo_data(push, 0x00000000);
+                       evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 1);
+                       evo_data(push, 0x40000000);
+                       evo_mthd(push, 0x085c + (nv_crtc->index * 0x400), 1);
+                       evo_data(push, 0x00000000);
+               } else {
+                       evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
+                       evo_data(push, 0x00000000);
+                       evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 1);
+                       evo_data(push, 0x03000000);
+                       evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1);
+                       evo_data(push, 0x00000000);
+               }
 
-       disp->irq.dcb = dcb;
-       disp->irq.pclk = pclk;
-       disp->irq.script = script;
+               evo_kick(push, mast);
+       }
 
-ack:
-       nv_wr32(device, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK20);
-       nv_wr32(device, 0x610030, 0x80000000);
+       nv50_crtc_cursor_show_hide(nv_crtc, false, false);
 }
 
-/* If programming a TMDS output on a SOR that can also be configured for
- * DisplayPort, make sure NV50_SOR_DP_CTRL_ENABLE is forced off.
- *
- * It looks like the VBIOS TMDS scripts make an attempt at this, however,
- * the VBIOS scripts on at least one board I have only switch it off on
- * link 0, causing a blank display if the output has previously been
- * programmed for DisplayPort.
- */
 static void
-nv50_display_unk40_dp_set_tmds(struct drm_device *dev, struct dcb_output *dcb)
+nv50_crtc_commit(struct drm_crtc *crtc)
 {
-       struct nouveau_device *device = nouveau_dev(dev);
-       int or = ffs(dcb->or) - 1, link = !(dcb->dpconf.sor.link & 1);
+       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+       struct nv50_mast *mast = nv50_mast(crtc->dev);
+       u32 *push;
+
+       push = evo_wait(mast, 32);
+       if (push) {
+               if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) {
+                       evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
+                       evo_data(push, NvEvoVRAM_LP);
+                       evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 2);
+                       evo_data(push, 0xc0000000);
+                       evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8);
+               } else
+               if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+                       evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
+                       evo_data(push, nv_crtc->fb.tile_flags);
+                       evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 2);
+                       evo_data(push, 0xc0000000);
+                       evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8);
+                       evo_mthd(push, 0x085c + (nv_crtc->index * 0x400), 1);
+                       evo_data(push, NvEvoVRAM);
+               } else {
+                       evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
+                       evo_data(push, nv_crtc->fb.tile_flags);
+                       evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 4);
+                       evo_data(push, 0x83000000);
+                       evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8);
+                       evo_data(push, 0x00000000);
+                       evo_data(push, 0x00000000);
+                       evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1);
+                       evo_data(push, NvEvoVRAM);
+                       evo_mthd(push, 0x0430 + (nv_crtc->index * 0x300), 1);
+                       evo_data(push, 0xffffff00);
+               }
+
+               evo_kick(push, mast);
+       }
+
+       nv50_crtc_cursor_show_hide(nv_crtc, nv_crtc->cursor.visible, true);
+       nv50_display_flip_next(crtc, crtc->fb, NULL, 1);
+}
+
+static bool
+nv50_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode,
+                    struct drm_display_mode *adjusted_mode)
+{
+       return true;
+}
+
+static int
+nv50_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
+{
+       struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb);
+       int ret;
+
+       ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM);
+       if (ret)
+               return ret;
+
+       if (old_fb) {
+               nvfb = nouveau_framebuffer(old_fb);
+               nouveau_bo_unpin(nvfb->nvbo);
+       }
+
+       return 0;
+}
+
+static int
+nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
+                  struct drm_display_mode *mode, int x, int y,
+                  struct drm_framebuffer *old_fb)
+{
+       struct nv50_mast *mast = nv50_mast(crtc->dev);
+       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+       struct nouveau_connector *nv_connector;
+       u32 ilace = (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 1;
+       u32 vscan = (mode->flags & DRM_MODE_FLAG_DBLSCAN) ? 2 : 1;
+       u32 hactive, hsynce, hbackp, hfrontp, hblanke, hblanks;
+       u32 vactive, vsynce, vbackp, vfrontp, vblanke, vblanks;
+       u32 vblan2e = 0, vblan2s = 1;
+       u32 *push;
+       int ret;
+
+       hactive = mode->htotal;
+       hsynce  = mode->hsync_end - mode->hsync_start - 1;
+       hbackp  = mode->htotal - mode->hsync_end;
+       hblanke = hsynce + hbackp;
+       hfrontp = mode->hsync_start - mode->hdisplay;
+       hblanks = mode->htotal - hfrontp - 1;
+
+       vactive = mode->vtotal * vscan / ilace;
+       vsynce  = ((mode->vsync_end - mode->vsync_start) * vscan / ilace) - 1;
+       vbackp  = (mode->vtotal - mode->vsync_end) * vscan / ilace;
+       vblanke = vsynce + vbackp;
+       vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace;
+       vblanks = vactive - vfrontp - 1;
+       if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+               vblan2e = vactive + vsynce + vbackp;
+               vblan2s = vblan2e + (mode->vdisplay * vscan / ilace);
+               vactive = (vactive * 2) + 1;
+       }
+
+       ret = nv50_crtc_swap_fbs(crtc, old_fb);
+       if (ret)
+               return ret;
+
+       push = evo_wait(mast, 64);
+       if (push) {
+               if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+                       evo_mthd(push, 0x0804 + (nv_crtc->index * 0x400), 2);
+                       evo_data(push, 0x00800000 | mode->clock);
+                       evo_data(push, (ilace == 2) ? 2 : 0);
+                       evo_mthd(push, 0x0810 + (nv_crtc->index * 0x400), 6);
+                       evo_data(push, 0x00000000);
+                       evo_data(push, (vactive << 16) | hactive);
+                       evo_data(push, ( vsynce << 16) | hsynce);
+                       evo_data(push, (vblanke << 16) | hblanke);
+                       evo_data(push, (vblanks << 16) | hblanks);
+                       evo_data(push, (vblan2e << 16) | vblan2s);
+                       evo_mthd(push, 0x082c + (nv_crtc->index * 0x400), 1);
+                       evo_data(push, 0x00000000);
+                       evo_mthd(push, 0x0900 + (nv_crtc->index * 0x400), 2);
+                       evo_data(push, 0x00000311);
+                       evo_data(push, 0x00000100);
+               } else {
+                       evo_mthd(push, 0x0410 + (nv_crtc->index * 0x300), 6);
+                       evo_data(push, 0x00000000);
+                       evo_data(push, (vactive << 16) | hactive);
+                       evo_data(push, ( vsynce << 16) | hsynce);
+                       evo_data(push, (vblanke << 16) | hblanke);
+                       evo_data(push, (vblanks << 16) | hblanks);
+                       evo_data(push, (vblan2e << 16) | vblan2s);
+                       evo_mthd(push, 0x042c + (nv_crtc->index * 0x300), 1);
+                       evo_data(push, 0x00000000); /* ??? */
+                       evo_mthd(push, 0x0450 + (nv_crtc->index * 0x300), 3);
+                       evo_data(push, mode->clock * 1000);
+                       evo_data(push, 0x00200000); /* ??? */
+                       evo_data(push, mode->clock * 1000);
+                       evo_mthd(push, 0x04d0 + (nv_crtc->index * 0x300), 2);
+                       evo_data(push, 0x00000311);
+                       evo_data(push, 0x00000100);
+               }
+
+               evo_kick(push, mast);
+       }
+
+       nv_connector = nouveau_crtc_connector_get(nv_crtc);
+       nv50_crtc_set_dither(nv_crtc, false);
+       nv50_crtc_set_scale(nv_crtc, false);
+       nv50_crtc_set_color_vibrance(nv_crtc, false);
+       nv50_crtc_set_image(nv_crtc, crtc->fb, x, y, false);
+       return 0;
+}
+
+static int
+nv50_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+                       struct drm_framebuffer *old_fb)
+{
+       struct nouveau_drm *drm = nouveau_drm(crtc->dev);
+       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+       int ret;
+
+       if (!crtc->fb) {
+               NV_DEBUG(drm, "No FB bound\n");
+               return 0;
+       }
+
+       ret = nv50_crtc_swap_fbs(crtc, old_fb);
+       if (ret)
+               return ret;
+
+       nv50_display_flip_stop(crtc);
+       nv50_crtc_set_image(nv_crtc, crtc->fb, x, y, true);
+       nv50_display_flip_next(crtc, crtc->fb, NULL, 1);
+       return 0;
+}
+
+static int
+nv50_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
+                              struct drm_framebuffer *fb, int x, int y,
+                              enum mode_set_atomic state)
+{
+       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+       nv50_display_flip_stop(crtc);
+       nv50_crtc_set_image(nv_crtc, fb, x, y, true);
+       return 0;
+}
+
+static void
+nv50_crtc_lut_load(struct drm_crtc *crtc)
+{
+       struct nv50_disp *disp = nv50_disp(crtc->dev);
+       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+       void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo);
+       int i;
+
+       for (i = 0; i < 256; i++) {
+               u16 r = nv_crtc->lut.r[i] >> 2;
+               u16 g = nv_crtc->lut.g[i] >> 2;
+               u16 b = nv_crtc->lut.b[i] >> 2;
+
+               if (nv_mclass(disp->core) < NVD0_DISP_CLASS) {
+                       writew(r + 0x0000, lut + (i * 0x08) + 0);
+                       writew(g + 0x0000, lut + (i * 0x08) + 2);
+                       writew(b + 0x0000, lut + (i * 0x08) + 4);
+               } else {
+                       writew(r + 0x6000, lut + (i * 0x20) + 0);
+                       writew(g + 0x6000, lut + (i * 0x20) + 2);
+                       writew(b + 0x6000, lut + (i * 0x20) + 4);
+               }
+       }
+}
+
+static int
+nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
+                    uint32_t handle, uint32_t width, uint32_t height)
+{
+       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+       struct drm_device *dev = crtc->dev;
+       struct drm_gem_object *gem;
+       struct nouveau_bo *nvbo;
+       bool visible = (handle != 0);
+       int i, ret = 0;
+
+       if (visible) {
+               if (width != 64 || height != 64)
+                       return -EINVAL;
+
+               gem = drm_gem_object_lookup(dev, file_priv, handle);
+               if (unlikely(!gem))
+                       return -ENOENT;
+               nvbo = nouveau_gem_object(gem);
+
+               ret = nouveau_bo_map(nvbo);
+               if (ret == 0) {
+                       for (i = 0; i < 64 * 64; i++) {
+                               u32 v = nouveau_bo_rd32(nvbo, i);
+                               nouveau_bo_wr32(nv_crtc->cursor.nvbo, i, v);
+                       }
+                       nouveau_bo_unmap(nvbo);
+               }
+
+               drm_gem_object_unreference_unlocked(gem);
+       }
+
+       if (visible != nv_crtc->cursor.visible) {
+               nv50_crtc_cursor_show_hide(nv_crtc, visible, true);
+               nv_crtc->cursor.visible = visible;
+       }
+
+       return ret;
+}
+
+static int
+nv50_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+{
+       struct nv50_curs *curs = nv50_curs(crtc);
+       struct nv50_chan *chan = nv50_chan(curs);
+       nv_wo32(chan->user, 0x0084, (y << 16) | (x & 0xffff));
+       nv_wo32(chan->user, 0x0080, 0x00000000);
+       return 0;
+}
+
+static void
+nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
+                   uint32_t start, uint32_t size)
+{
+       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+       u32 end = max(start + size, (u32)256);
+       u32 i;
+
+       for (i = start; i < end; i++) {
+               nv_crtc->lut.r[i] = r[i];
+               nv_crtc->lut.g[i] = g[i];
+               nv_crtc->lut.b[i] = b[i];
+       }
+
+       nv50_crtc_lut_load(crtc);
+}
+
+static void
+nv50_crtc_destroy(struct drm_crtc *crtc)
+{
+       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+       struct nv50_disp *disp = nv50_disp(crtc->dev);
+       struct nv50_head *head = nv50_head(crtc);
+       nv50_dmac_destroy(disp->core, &head->ovly.base);
+       nv50_pioc_destroy(disp->core, &head->oimm.base);
+       nv50_dmac_destroy(disp->core, &head->sync.base);
+       nv50_pioc_destroy(disp->core, &head->curs.base);
+       nouveau_bo_unmap(nv_crtc->cursor.nvbo);
+       if (nv_crtc->cursor.nvbo)
+               nouveau_bo_unpin(nv_crtc->cursor.nvbo);
+       nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
+       nouveau_bo_unmap(nv_crtc->lut.nvbo);
+       if (nv_crtc->lut.nvbo)
+               nouveau_bo_unpin(nv_crtc->lut.nvbo);
+       nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
+       drm_crtc_cleanup(crtc);
+       kfree(crtc);
+}
+
+static const struct drm_crtc_helper_funcs nv50_crtc_hfunc = {
+       .dpms = nv50_crtc_dpms,
+       .prepare = nv50_crtc_prepare,
+       .commit = nv50_crtc_commit,
+       .mode_fixup = nv50_crtc_mode_fixup,
+       .mode_set = nv50_crtc_mode_set,
+       .mode_set_base = nv50_crtc_mode_set_base,
+       .mode_set_base_atomic = nv50_crtc_mode_set_base_atomic,
+       .load_lut = nv50_crtc_lut_load,
+};
+
+static const struct drm_crtc_funcs nv50_crtc_func = {
+       .cursor_set = nv50_crtc_cursor_set,
+       .cursor_move = nv50_crtc_cursor_move,
+       .gamma_set = nv50_crtc_gamma_set,
+       .set_config = drm_crtc_helper_set_config,
+       .destroy = nv50_crtc_destroy,
+       .page_flip = nouveau_crtc_page_flip,
+};
+
+static void
+nv50_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
+{
+}
+
+static void
+nv50_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
+{
+}
+
+static int
+nv50_crtc_create(struct drm_device *dev, struct nouveau_object *core, int index)
+{
+       struct nv50_disp *disp = nv50_disp(dev);
+       struct nv50_head *head;
+       struct drm_crtc *crtc;
+       int ret, i;
+
+       head = kzalloc(sizeof(*head), GFP_KERNEL);
+       if (!head)
+               return -ENOMEM;
+
+       head->base.index = index;
+       head->base.set_dither = nv50_crtc_set_dither;
+       head->base.set_scale = nv50_crtc_set_scale;
+       head->base.set_color_vibrance = nv50_crtc_set_color_vibrance;
+       head->base.color_vibrance = 50;
+       head->base.vibrant_hue = 0;
+       head->base.cursor.set_offset = nv50_cursor_set_offset;
+       head->base.cursor.set_pos = nv50_cursor_set_pos;
+       for (i = 0; i < 256; i++) {
+               head->base.lut.r[i] = i << 8;
+               head->base.lut.g[i] = i << 8;
+               head->base.lut.b[i] = i << 8;
+       }
+
+       crtc = &head->base.base;
+       drm_crtc_init(dev, crtc, &nv50_crtc_func);
+       drm_crtc_helper_add(crtc, &nv50_crtc_hfunc);
+       drm_mode_crtc_set_gamma_size(crtc, 256);
+
+       ret = nouveau_bo_new(dev, 8192, 0x100, TTM_PL_FLAG_VRAM,
+                            0, 0x0000, NULL, &head->base.lut.nvbo);
+       if (!ret) {
+               ret = nouveau_bo_pin(head->base.lut.nvbo, TTM_PL_FLAG_VRAM);
+               if (!ret) {
+                       ret = nouveau_bo_map(head->base.lut.nvbo);
+                       if (ret)
+                               nouveau_bo_unpin(head->base.lut.nvbo);
+               }
+               if (ret)
+                       nouveau_bo_ref(NULL, &head->base.lut.nvbo);
+       }
+
+       if (ret)
+               goto out;
+
+       nv50_crtc_lut_load(crtc);
+
+       /* allocate cursor resources */
+       ret = nv50_pioc_create(disp->core, NV50_DISP_CURS_CLASS, index,
+                             &(struct nv50_display_curs_class) {
+                                       .head = index,
+                             }, sizeof(struct nv50_display_curs_class),
+                             &head->curs.base);
+       if (ret)
+               goto out;
+
+       ret = nouveau_bo_new(dev, 64 * 64 * 4, 0x100, TTM_PL_FLAG_VRAM,
+                            0, 0x0000, NULL, &head->base.cursor.nvbo);
+       if (!ret) {
+               ret = nouveau_bo_pin(head->base.cursor.nvbo, TTM_PL_FLAG_VRAM);
+               if (!ret) {
+                       ret = nouveau_bo_map(head->base.cursor.nvbo);
+                       if (ret)
+                               nouveau_bo_unpin(head->base.lut.nvbo);
+               }
+               if (ret)
+                       nouveau_bo_ref(NULL, &head->base.cursor.nvbo);
+       }
+
+       if (ret)
+               goto out;
+
+       /* allocate page flip / sync resources */
+       ret = nv50_dmac_create(disp->core, NV50_DISP_SYNC_CLASS, index,
+                             &(struct nv50_display_sync_class) {
+                                       .pushbuf = EVO_PUSH_HANDLE(SYNC, index),
+                                       .head = index,
+                             }, sizeof(struct nv50_display_sync_class),
+                             disp->sync->bo.offset, &head->sync.base);
+       if (ret)
+               goto out;
+
+       head->sync.sem.offset = EVO_SYNC(1 + index, 0x00);
+
+       /* allocate overlay resources */
+       ret = nv50_pioc_create(disp->core, NV50_DISP_OIMM_CLASS, index,
+                             &(struct nv50_display_oimm_class) {
+                                       .head = index,
+                             }, sizeof(struct nv50_display_oimm_class),
+                             &head->oimm.base);
+       if (ret)
+               goto out;
+
+       ret = nv50_dmac_create(disp->core, NV50_DISP_OVLY_CLASS, index,
+                             &(struct nv50_display_ovly_class) {
+                                       .pushbuf = EVO_PUSH_HANDLE(OVLY, index),
+                                       .head = index,
+                             }, sizeof(struct nv50_display_ovly_class),
+                             disp->sync->bo.offset, &head->ovly.base);
+       if (ret)
+               goto out;
+
+out:
+       if (ret)
+               nv50_crtc_destroy(crtc);
+       return ret;
+}
+
+/******************************************************************************
+ * DAC
+ *****************************************************************************/
+static void
+nv50_dac_dpms(struct drm_encoder *encoder, int mode)
+{
+       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+       struct nv50_disp *disp = nv50_disp(encoder->dev);
+       int or = nv_encoder->or;
+       u32 dpms_ctrl;
+
+       dpms_ctrl = 0x00000000;
+       if (mode == DRM_MODE_DPMS_STANDBY || mode == DRM_MODE_DPMS_OFF)
+               dpms_ctrl |= 0x00000001;
+       if (mode == DRM_MODE_DPMS_SUSPEND || mode == DRM_MODE_DPMS_OFF)
+               dpms_ctrl |= 0x00000004;
+
+       nv_call(disp->core, NV50_DISP_DAC_PWR + or, dpms_ctrl);
+}
+
+static bool
+nv50_dac_mode_fixup(struct drm_encoder *encoder,
+                   const struct drm_display_mode *mode,
+                   struct drm_display_mode *adjusted_mode)
+{
+       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+       struct nouveau_connector *nv_connector;
+
+       nv_connector = nouveau_encoder_connector_get(nv_encoder);
+       if (nv_connector && nv_connector->native_mode) {
+               if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) {
+                       int id = adjusted_mode->base.id;
+                       *adjusted_mode = *nv_connector->native_mode;
+                       adjusted_mode->base.id = id;
+               }
+       }
+
+       return true;
+}
+
+static void
+nv50_dac_commit(struct drm_encoder *encoder)
+{
+}
+
+static void
+nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
+                 struct drm_display_mode *adjusted_mode)
+{
+       struct nv50_mast *mast = nv50_mast(encoder->dev);
+       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+       struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+       u32 *push;
+
+       nv50_dac_dpms(encoder, DRM_MODE_DPMS_ON);
+
+       push = evo_wait(mast, 8);
+       if (push) {
+               if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+                       u32 syncs = 0x00000000;
+
+                       if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+                               syncs |= 0x00000001;
+                       if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+                               syncs |= 0x00000002;
+
+                       evo_mthd(push, 0x0400 + (nv_encoder->or * 0x080), 2);
+                       evo_data(push, 1 << nv_crtc->index);
+                       evo_data(push, syncs);
+               } else {
+                       u32 magic = 0x31ec6000 | (nv_crtc->index << 25);
+                       u32 syncs = 0x00000001;
+
+                       if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+                               syncs |= 0x00000008;
+                       if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+                               syncs |= 0x00000010;
+
+                       if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+                               magic |= 0x00000001;
+
+                       evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2);
+                       evo_data(push, syncs);
+                       evo_data(push, magic);
+                       evo_mthd(push, 0x0180 + (nv_encoder->or * 0x020), 1);
+                       evo_data(push, 1 << nv_crtc->index);
+               }
+
+               evo_kick(push, mast);
+       }
+
+       nv_encoder->crtc = encoder->crtc;
+}
+
+static void
+nv50_dac_disconnect(struct drm_encoder *encoder)
+{
+       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+       struct nv50_mast *mast = nv50_mast(encoder->dev);
+       const int or = nv_encoder->or;
+       u32 *push;
+
+       if (nv_encoder->crtc) {
+               nv50_crtc_prepare(nv_encoder->crtc);
+
+               push = evo_wait(mast, 4);
+               if (push) {
+                       if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+                               evo_mthd(push, 0x0400 + (or * 0x080), 1);
+                               evo_data(push, 0x00000000);
+                       } else {
+                               evo_mthd(push, 0x0180 + (or * 0x020), 1);
+                               evo_data(push, 0x00000000);
+                       }
+
+                       evo_mthd(push, 0x0080, 1);
+                       evo_data(push, 0x00000000);
+                       evo_kick(push, mast);
+               }
+       }
+
+       nv_encoder->crtc = NULL;
+}
+
+static enum drm_connector_status
+nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
+{
+       struct nv50_disp *disp = nv50_disp(encoder->dev);
+       int ret, or = nouveau_encoder(encoder)->or;
+       u32 load = 0;
+
+       ret = nv_exec(disp->core, NV50_DISP_DAC_LOAD + or, &load, sizeof(load));
+       if (ret || load != 7)
+               return connector_status_disconnected;
+
+       return connector_status_connected;
+}
+
+static void
+nv50_dac_destroy(struct drm_encoder *encoder)
+{
+       drm_encoder_cleanup(encoder);
+       kfree(encoder);
+}
+
+static const struct drm_encoder_helper_funcs nv50_dac_hfunc = {
+       .dpms = nv50_dac_dpms,
+       .mode_fixup = nv50_dac_mode_fixup,
+       .prepare = nv50_dac_disconnect,
+       .commit = nv50_dac_commit,
+       .mode_set = nv50_dac_mode_set,
+       .disable = nv50_dac_disconnect,
+       .get_crtc = nv50_display_crtc_get,
+       .detect = nv50_dac_detect
+};
+
+static const struct drm_encoder_funcs nv50_dac_func = {
+       .destroy = nv50_dac_destroy,
+};
+
+static int
+nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
+{
+       struct drm_device *dev = connector->dev;
+       struct nouveau_encoder *nv_encoder;
        struct drm_encoder *encoder;
-       u32 tmp;
 
-       if (dcb->type != DCB_OUTPUT_TMDS)
+       nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
+       if (!nv_encoder)
+               return -ENOMEM;
+       nv_encoder->dcb = dcbe;
+       nv_encoder->or = ffs(dcbe->or) - 1;
+
+       encoder = to_drm_encoder(nv_encoder);
+       encoder->possible_crtcs = dcbe->heads;
+       encoder->possible_clones = 0;
+       drm_encoder_init(dev, encoder, &nv50_dac_func, DRM_MODE_ENCODER_DAC);
+       drm_encoder_helper_add(encoder, &nv50_dac_hfunc);
+
+       drm_mode_connector_attach_encoder(connector, encoder);
+       return 0;
+}
+
+/******************************************************************************
+ * Audio
+ *****************************************************************************/
+static void
+nv50_audio_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
+{
+       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+       struct nouveau_connector *nv_connector;
+       struct nv50_disp *disp = nv50_disp(encoder->dev);
+
+       nv_connector = nouveau_encoder_connector_get(nv_encoder);
+       if (!drm_detect_monitor_audio(nv_connector->edid))
                return;
 
-       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-               struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+       drm_edid_to_eld(&nv_connector->base, nv_connector->edid);
+
+       nv_exec(disp->core, NVA3_DISP_SOR_HDA_ELD + nv_encoder->or,
+                           nv_connector->base.eld,
+                           nv_connector->base.eld[2] * 4);
+}
+
+static void
+nv50_audio_disconnect(struct drm_encoder *encoder)
+{
+       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+       struct nv50_disp *disp = nv50_disp(encoder->dev);
+
+       nv_exec(disp->core, NVA3_DISP_SOR_HDA_ELD + nv_encoder->or, NULL, 0);
+}
+
+/******************************************************************************
+ * HDMI
+ *****************************************************************************/
+static void
+nv50_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
+{
+       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+       struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+       struct nouveau_connector *nv_connector;
+       struct nv50_disp *disp = nv50_disp(encoder->dev);
+       const u32 moff = (nv_crtc->index << 3) | nv_encoder->or;
+       u32 rekey = 56; /* binary driver, and tegra constant */
+       u32 max_ac_packet;
+
+       nv_connector = nouveau_encoder_connector_get(nv_encoder);
+       if (!drm_detect_hdmi_monitor(nv_connector->edid))
+               return;
+
+       max_ac_packet  = mode->htotal - mode->hdisplay;
+       max_ac_packet -= rekey;
+       max_ac_packet -= 18; /* constant from tegra */
+       max_ac_packet /= 32;
+
+       nv_call(disp->core, NV84_DISP_SOR_HDMI_PWR + moff,
+                           NV84_DISP_SOR_HDMI_PWR_STATE_ON |
+                           (max_ac_packet << 16) | rekey);
+
+       nv50_audio_mode_set(encoder, mode);
+}
+
+static void
+nv50_hdmi_disconnect(struct drm_encoder *encoder)
+{
+       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+       struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
+       struct nv50_disp *disp = nv50_disp(encoder->dev);
+       const u32 moff = (nv_crtc->index << 3) | nv_encoder->or;
+
+       nv50_audio_disconnect(encoder);
+
+       nv_call(disp->core, NV84_DISP_SOR_HDMI_PWR + moff, 0x00000000);
+}
+
+/******************************************************************************
+ * SOR
+ *****************************************************************************/
+static void
+nv50_sor_dpms(struct drm_encoder *encoder, int mode)
+{
+       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+       struct drm_device *dev = encoder->dev;
+       struct nv50_disp *disp = nv50_disp(dev);
+       struct drm_encoder *partner;
+       int or = nv_encoder->or;
+
+       nv_encoder->last_dpms = mode;
 
-               if (nv_encoder->dcb->type == DCB_OUTPUT_DP &&
-                   nv_encoder->dcb->or & (1 << or)) {
-                       tmp  = nv_rd32(device, NV50_SOR_DP_CTRL(or, link));
-                       tmp &= ~NV50_SOR_DP_CTRL_ENABLED;
-                       nv_wr32(device, NV50_SOR_DP_CTRL(or, link), tmp);
+       list_for_each_entry(partner, &dev->mode_config.encoder_list, head) {
+               struct nouveau_encoder *nv_partner = nouveau_encoder(partner);
+
+               if (partner->encoder_type != DRM_MODE_ENCODER_TMDS)
+                       continue;
+
+               if (nv_partner != nv_encoder &&
+                   nv_partner->dcb->or == nv_encoder->dcb->or) {
+                       if (nv_partner->last_dpms == DRM_MODE_DPMS_ON)
+                               return;
                        break;
                }
        }
+
+       nv_call(disp->core, NV50_DISP_SOR_PWR + or, (mode == DRM_MODE_DPMS_ON));
+
+       if (nv_encoder->dcb->type == DCB_OUTPUT_DP)
+               nouveau_dp_dpms(encoder, mode, nv_encoder->dp.datarate, disp->core);
+}
+
+static bool
+nv50_sor_mode_fixup(struct drm_encoder *encoder,
+                   const struct drm_display_mode *mode,
+                   struct drm_display_mode *adjusted_mode)
+{
+       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+       struct nouveau_connector *nv_connector;
+
+       nv_connector = nouveau_encoder_connector_get(nv_encoder);
+       if (nv_connector && nv_connector->native_mode) {
+               if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) {
+                       int id = adjusted_mode->base.id;
+                       *adjusted_mode = *nv_connector->native_mode;
+                       adjusted_mode->base.id = id;
+               }
+       }
+
+       return true;
 }
 
 static void
-nv50_display_unk40_handler(struct drm_device *dev)
+nv50_sor_disconnect(struct drm_encoder *encoder)
 {
-       struct nouveau_device *device = nouveau_dev(dev);
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nv50_display *disp = nv50_display(dev);
-       struct dcb_output *dcb = disp->irq.dcb;
-       u16 script = disp->irq.script;
-       u32 unk30 = nv_rd32(device, 0x610030), pclk = disp->irq.pclk;
+       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+       struct nv50_mast *mast = nv50_mast(encoder->dev);
+       const int or = nv_encoder->or;
+       u32 *push;
+
+       if (nv_encoder->crtc) {
+               nv50_crtc_prepare(nv_encoder->crtc);
+
+               push = evo_wait(mast, 4);
+               if (push) {
+                       if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+                               evo_mthd(push, 0x0600 + (or * 0x40), 1);
+                               evo_data(push, 0x00000000);
+                       } else {
+                               evo_mthd(push, 0x0200 + (or * 0x20), 1);
+                               evo_data(push, 0x00000000);
+                       }
 
-       NV_DEBUG(drm, "0x610030: 0x%08x\n", unk30);
-       disp->irq.dcb = NULL;
-       if (!dcb)
-               goto ack;
+                       evo_mthd(push, 0x0080, 1);
+                       evo_data(push, 0x00000000);
+                       evo_kick(push, mast);
+               }
 
-       nouveau_bios_run_display_table(dev, script, -pclk, dcb, -1);
-       nv50_display_unk40_dp_set_tmds(dev, dcb);
+               nv50_hdmi_disconnect(encoder);
+       }
 
-ack:
-       nv_wr32(device, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK40);
-       nv_wr32(device, 0x610030, 0x80000000);
-       nv_wr32(device, 0x619494, nv_rd32(device, 0x619494) | 8);
+       nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
+       nv_encoder->crtc = NULL;
 }
 
 static void
-nv50_display_bh(unsigned long data)
+nv50_sor_prepare(struct drm_encoder *encoder)
 {
-       struct drm_device *dev = (struct drm_device *)data;
-       struct nouveau_device *device = nouveau_dev(dev);
+       nv50_sor_disconnect(encoder);
+       if (nouveau_encoder(encoder)->dcb->type == DCB_OUTPUT_DP)
+               evo_sync(encoder->dev);
+}
+
+static void
+nv50_sor_commit(struct drm_encoder *encoder)
+{
+}
+
+static void
+nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
+                 struct drm_display_mode *mode)
+{
+       struct nv50_disp *disp = nv50_disp(encoder->dev);
+       struct nv50_mast *mast = nv50_mast(encoder->dev);
+       struct drm_device *dev = encoder->dev;
        struct nouveau_drm *drm = nouveau_drm(dev);
+       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+       struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+       struct nouveau_connector *nv_connector;
+       struct nvbios *bios = &drm->vbios;
+       u32 *push, lvds = 0;
+       u8 owner = 1 << nv_crtc->index;
+       u8 proto = 0xf;
+       u8 depth = 0x0;
 
-       for (;;) {
-               uint32_t intr0 = nv_rd32(device, NV50_PDISPLAY_INTR_0);
-               uint32_t intr1 = nv_rd32(device, NV50_PDISPLAY_INTR_1);
+       nv_connector = nouveau_encoder_connector_get(nv_encoder);
+       switch (nv_encoder->dcb->type) {
+       case DCB_OUTPUT_TMDS:
+               if (nv_encoder->dcb->sorconf.link & 1) {
+                       if (mode->clock < 165000)
+                               proto = 0x1;
+                       else
+                               proto = 0x5;
+               } else {
+                       proto = 0x2;
+               }
+
+               nv50_hdmi_mode_set(encoder, mode);
+               break;
+       case DCB_OUTPUT_LVDS:
+               proto = 0x0;
 
-               NV_DEBUG(drm, "PDISPLAY_INTR_BH 0x%08x 0x%08x\n", intr0, intr1);
+               if (bios->fp_no_ddc) {
+                       if (bios->fp.dual_link)
+                               lvds |= 0x0100;
+                       if (bios->fp.if_is_24bit)
+                               lvds |= 0x0200;
+               } else {
+                       if (nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
+                               if (((u8 *)nv_connector->edid)[121] == 2)
+                                       lvds |= 0x0100;
+                       } else
+                       if (mode->clock >= bios->fp.duallink_transition_clk) {
+                               lvds |= 0x0100;
+                       }
 
-               if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK10)
-                       nv50_display_unk10_handler(dev);
-               else
-               if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK20)
-                       nv50_display_unk20_handler(dev);
-               else
-               if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK40)
-                       nv50_display_unk40_handler(dev);
+                       if (lvds & 0x0100) {
+                               if (bios->fp.strapless_is_24bit & 2)
+                                       lvds |= 0x0200;
+                       } else {
+                               if (bios->fp.strapless_is_24bit & 1)
+                                       lvds |= 0x0200;
+                       }
+
+                       if (nv_connector->base.display_info.bpc == 8)
+                               lvds |= 0x0200;
+               }
+
+               nv_call(disp->core, NV50_DISP_SOR_LVDS_SCRIPT + nv_encoder->or, lvds);
+               break;
+       case DCB_OUTPUT_DP:
+               if (nv_connector->base.display_info.bpc == 6) {
+                       nv_encoder->dp.datarate = mode->clock * 18 / 8;
+                       depth = 0x2;
+               } else
+               if (nv_connector->base.display_info.bpc == 8) {
+                       nv_encoder->dp.datarate = mode->clock * 24 / 8;
+                       depth = 0x5;
+               } else {
+                       nv_encoder->dp.datarate = mode->clock * 30 / 8;
+                       depth = 0x6;
+               }
+
+               if (nv_encoder->dcb->sorconf.link & 1)
+                       proto = 0x8;
                else
-                       break;
+                       proto = 0x9;
+               break;
+       default:
+               BUG_ON(1);
+               break;
        }
 
-       nv_wr32(device, NV03_PMC_INTR_EN_0, 1);
+       nv50_sor_dpms(encoder, DRM_MODE_DPMS_ON);
+
+       push = evo_wait(nv50_mast(dev), 8);
+       if (push) {
+               if (nv50_vers(mast) < NVD0_DISP_CLASS) {
+                       evo_mthd(push, 0x0600 + (nv_encoder->or * 0x040), 1);
+                       evo_data(push, (depth << 16) | (proto << 8) | owner);
+               } else {
+                       u32 magic = 0x31ec6000 | (nv_crtc->index << 25);
+                       u32 syncs = 0x00000001;
+
+                       if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+                               syncs |= 0x00000008;
+                       if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+                               syncs |= 0x00000010;
+
+                       if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+                               magic |= 0x00000001;
+
+                       evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2);
+                       evo_data(push, syncs | (depth << 6));
+                       evo_data(push, magic);
+                       evo_mthd(push, 0x0200 + (nv_encoder->or * 0x020), 1);
+                       evo_data(push, owner | (proto << 8));
+               }
+
+               evo_kick(push, mast);
+       }
+
+       nv_encoder->crtc = encoder->crtc;
 }
 
 static void
-nv50_display_error_handler(struct drm_device *dev)
+nv50_sor_destroy(struct drm_encoder *encoder)
 {
-       struct nouveau_device *device = nouveau_dev(dev);
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       u32 channels = (nv_rd32(device, NV50_PDISPLAY_INTR_0) & 0x001f0000) >> 16;
-       u32 addr, data;
-       int chid;
+       drm_encoder_cleanup(encoder);
+       kfree(encoder);
+}
 
-       for (chid = 0; chid < 5; chid++) {
-               if (!(channels & (1 << chid)))
-                       continue;
+static const struct drm_encoder_helper_funcs nv50_sor_hfunc = {
+       .dpms = nv50_sor_dpms,
+       .mode_fixup = nv50_sor_mode_fixup,
+       .prepare = nv50_sor_prepare,
+       .commit = nv50_sor_commit,
+       .mode_set = nv50_sor_mode_set,
+       .disable = nv50_sor_disconnect,
+       .get_crtc = nv50_display_crtc_get,
+};
+
+static const struct drm_encoder_funcs nv50_sor_func = {
+       .destroy = nv50_sor_destroy,
+};
+
+static int
+nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
+{
+       struct drm_device *dev = connector->dev;
+       struct nouveau_encoder *nv_encoder;
+       struct drm_encoder *encoder;
+
+       nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
+       if (!nv_encoder)
+               return -ENOMEM;
+       nv_encoder->dcb = dcbe;
+       nv_encoder->or = ffs(dcbe->or) - 1;
+       nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
 
-               nv_wr32(device, NV50_PDISPLAY_INTR_0, 0x00010000 << chid);
-               addr = nv_rd32(device, NV50_PDISPLAY_TRAPPED_ADDR(chid));
-               data = nv_rd32(device, NV50_PDISPLAY_TRAPPED_DATA(chid));
-               NV_ERROR(drm, "EvoCh %d Mthd 0x%04x Data 0x%08x "
-                             "(0x%04x 0x%02x)\n", chid,
-                        addr & 0xffc, data, addr >> 16, (addr >> 12) & 0xf);
+       encoder = to_drm_encoder(nv_encoder);
+       encoder->possible_crtcs = dcbe->heads;
+       encoder->possible_clones = 0;
+       drm_encoder_init(dev, encoder, &nv50_sor_func, DRM_MODE_ENCODER_TMDS);
+       drm_encoder_helper_add(encoder, &nv50_sor_hfunc);
+
+       drm_mode_connector_attach_encoder(connector, encoder);
+       return 0;
+}
+
+/******************************************************************************
+ * Init
+ *****************************************************************************/
+void
+nv50_display_fini(struct drm_device *dev)
+{
+}
 
-               nv_wr32(device, NV50_PDISPLAY_TRAPPED_ADDR(chid), 0x90000000);
+int
+nv50_display_init(struct drm_device *dev)
+{
+       u32 *push = evo_wait(nv50_mast(dev), 32);
+       if (push) {
+               evo_mthd(push, 0x0088, 1);
+               evo_data(push, NvEvoSync);
+               evo_kick(push, nv50_mast(dev));
+               return evo_sync(dev);
        }
+
+       return -EBUSY;
 }
 
 void
-nv50_display_intr(struct drm_device *dev)
+nv50_display_destroy(struct drm_device *dev)
 {
+       struct nv50_disp *disp = nv50_disp(dev);
+
+       nv50_dmac_destroy(disp->core, &disp->mast.base);
+
+       nouveau_bo_unmap(disp->sync);
+       if (disp->sync)
+               nouveau_bo_unpin(disp->sync);
+       nouveau_bo_ref(NULL, &disp->sync);
+
+       nouveau_display(dev)->priv = NULL;
+       kfree(disp);
+}
+
+int
+nv50_display_create(struct drm_device *dev)
+{
+       static const u16 oclass[] = {
+               NVE0_DISP_CLASS,
+               NVD0_DISP_CLASS,
+               NVA3_DISP_CLASS,
+               NV94_DISP_CLASS,
+               NVA0_DISP_CLASS,
+               NV84_DISP_CLASS,
+               NV50_DISP_CLASS,
+       };
        struct nouveau_device *device = nouveau_dev(dev);
        struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nv50_display *disp = nv50_display(dev);
-       uint32_t delayed = 0;
-
-       while (nv_rd32(device, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) {
-               uint32_t intr0 = nv_rd32(device, NV50_PDISPLAY_INTR_0);
-               uint32_t intr1 = nv_rd32(device, NV50_PDISPLAY_INTR_1);
-               uint32_t clock;
+       struct dcb_table *dcb = &drm->vbios.dcb;
+       struct drm_connector *connector, *tmp;
+       struct nv50_disp *disp;
+       struct dcb_output *dcbe;
+       int crtcs, ret, i;
 
-               NV_DEBUG(drm, "PDISPLAY_INTR 0x%08x 0x%08x\n", intr0, intr1);
+       disp = kzalloc(sizeof(*disp), GFP_KERNEL);
+       if (!disp)
+               return -ENOMEM;
 
-               if (!intr0 && !(intr1 & ~delayed))
-                       break;
+       nouveau_display(dev)->priv = disp;
+       nouveau_display(dev)->dtor = nv50_display_destroy;
+       nouveau_display(dev)->init = nv50_display_init;
+       nouveau_display(dev)->fini = nv50_display_fini;
 
-               if (intr0 & 0x001f0000) {
-                       nv50_display_error_handler(dev);
-                       intr0 &= ~0x001f0000;
+       /* small shared memory area we use for notifiers and semaphores */
+       ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
+                            0, 0x0000, NULL, &disp->sync);
+       if (!ret) {
+               ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM);
+               if (!ret) {
+                       ret = nouveau_bo_map(disp->sync);
+                       if (ret)
+                               nouveau_bo_unpin(disp->sync);
                }
+               if (ret)
+                       nouveau_bo_ref(NULL, &disp->sync);
+       }
 
-               if (intr1 & NV50_PDISPLAY_INTR_1_VBLANK_CRTC) {
-                       intr1 &= ~NV50_PDISPLAY_INTR_1_VBLANK_CRTC;
-                       delayed |= NV50_PDISPLAY_INTR_1_VBLANK_CRTC;
-               }
+       if (ret)
+               goto out;
+
+       /* attempt to allocate a supported evo display class */
+       ret = -ENODEV;
+       for (i = 0; ret && i < ARRAY_SIZE(oclass); i++) {
+               ret = nouveau_object_new(nv_object(drm), NVDRM_DEVICE,
+                                        0xd1500000, oclass[i], NULL, 0,
+                                        &disp->core);
+       }
 
-               clock = (intr1 & (NV50_PDISPLAY_INTR_1_CLK_UNK10 |
-                                 NV50_PDISPLAY_INTR_1_CLK_UNK20 |
-                                 NV50_PDISPLAY_INTR_1_CLK_UNK40));
-               if (clock) {
-                       nv_wr32(device, NV03_PMC_INTR_EN_0, 0);
-                       tasklet_schedule(&disp->tasklet);
-                       delayed |= clock;
-                       intr1 &= ~clock;
-               }
+       if (ret)
+               goto out;
+
+       /* allocate master evo channel */
+       ret = nv50_dmac_create(disp->core, NV50_DISP_MAST_CLASS, 0,
+                             &(struct nv50_display_mast_class) {
+                                       .pushbuf = EVO_PUSH_HANDLE(MAST, 0),
+                             }, sizeof(struct nv50_display_mast_class),
+                             disp->sync->bo.offset, &disp->mast.base);
+       if (ret)
+               goto out;
+
+       /* create crtc objects to represent the hw heads */
+       if (nv_mclass(disp->core) >= NVD0_DISP_CLASS)
+               crtcs = nv_rd32(device, 0x022448);
+       else
+               crtcs = 2;
 
-               if (intr0) {
-                       NV_ERROR(drm, "unknown PDISPLAY_INTR_0: 0x%08x\n", intr0);
-                       nv_wr32(device, NV50_PDISPLAY_INTR_0, intr0);
+       for (i = 0; i < crtcs; i++) {
+               ret = nv50_crtc_create(dev, disp->core, i);
+               if (ret)
+                       goto out;
+       }
+
+       /* create encoder/connector objects based on VBIOS DCB table */
+       for (i = 0, dcbe = &dcb->entry[0]; i < dcb->entries; i++, dcbe++) {
+               connector = nouveau_connector_create(dev, dcbe->connector);
+               if (IS_ERR(connector))
+                       continue;
+
+               if (dcbe->location != DCB_LOC_ON_CHIP) {
+                       NV_WARN(drm, "skipping off-chip encoder %d/%d\n",
+                               dcbe->type, ffs(dcbe->or) - 1);
+                       continue;
                }
 
-               if (intr1) {
-                       NV_ERROR(drm,
-                                "unknown PDISPLAY_INTR_1: 0x%08x\n", intr1);
-                       nv_wr32(device, NV50_PDISPLAY_INTR_1, intr1);
+               switch (dcbe->type) {
+               case DCB_OUTPUT_TMDS:
+               case DCB_OUTPUT_LVDS:
+               case DCB_OUTPUT_DP:
+                       nv50_sor_create(connector, dcbe);
+                       break;
+               case DCB_OUTPUT_ANALOG:
+                       nv50_dac_create(connector, dcbe);
+                       break;
+               default:
+                       NV_WARN(drm, "skipping unsupported encoder %d/%d\n",
+                               dcbe->type, ffs(dcbe->or) - 1);
+                       continue;
                }
        }
+
+       /* cull any connectors we created that don't have an encoder */
+       list_for_each_entry_safe(connector, tmp, &dev->mode_config.connector_list, head) {
+               if (connector->encoder_ids[0])
+                       continue;
+
+               NV_WARN(drm, "%s has no encoders, removing\n",
+                       drm_get_connector_name(connector));
+               connector->funcs->destroy(connector);
+       }
+
+out:
+       if (ret)
+               nv50_display_destroy(dev);
+       return ret;
 }
index 973554d..70da347 100644 (file)
 #include "nouveau_display.h"
 #include "nouveau_crtc.h"
 #include "nouveau_reg.h"
-#include "nv50_evo.h"
 
-struct nv50_display_crtc {
-       struct nouveau_channel *sync;
-       struct {
-               struct nouveau_bo *bo;
-               u32 offset;
-               u16 value;
-       } sem;
-};
+int  nv50_display_create(struct drm_device *);
+void nv50_display_destroy(struct drm_device *);
+int  nv50_display_init(struct drm_device *);
+void nv50_display_fini(struct drm_device *);
 
-struct nv50_display {
-       struct nouveau_channel *master;
-
-       struct nouveau_gpuobj *ramin;
-       u32 dmao;
-       u32 hash;
-
-       struct nv50_display_crtc crtc[2];
-
-       struct tasklet_struct tasklet;
-       struct {
-               struct dcb_output *dcb;
-               u16 script;
-               u32 pclk;
-       } irq;
-};
-
-static inline struct nv50_display *
-nv50_display(struct drm_device *dev)
-{
-       return nouveau_display(dev)->priv;
-}
-
-int nv50_display_early_init(struct drm_device *dev);
-void nv50_display_late_takedown(struct drm_device *dev);
-int nv50_display_create(struct drm_device *dev);
-int nv50_display_init(struct drm_device *dev);
-void nv50_display_fini(struct drm_device *dev);
-void nv50_display_destroy(struct drm_device *dev);
-void nv50_display_intr(struct drm_device *);
-int nv50_crtc_blank(struct nouveau_crtc *, bool blank);
-int nv50_crtc_set_clock(struct drm_device *, int head, int pclk);
-
-u32  nv50_display_active_crtcs(struct drm_device *);
-
-int  nv50_display_sync(struct drm_device *);
-int  nv50_display_flip_next(struct drm_crtc *, struct drm_framebuffer *,
-                           struct nouveau_channel *chan);
 void nv50_display_flip_stop(struct drm_crtc *);
-
-int  nv50_evo_create(struct drm_device *dev);
-void nv50_evo_destroy(struct drm_device *dev);
-int  nv50_evo_init(struct drm_device *dev);
-void nv50_evo_fini(struct drm_device *dev);
-void nv50_evo_dmaobj_init(struct nouveau_gpuobj *, u32 memtype, u64 base,
-                         u64 size);
-int  nv50_evo_dmaobj_new(struct nouveau_channel *, u32 handle, u32 memtype,
-                        u64 base, u64 size, struct nouveau_gpuobj **);
-
-int  nvd0_display_create(struct drm_device *);
-void nvd0_display_destroy(struct drm_device *);
-int  nvd0_display_init(struct drm_device *);
-void nvd0_display_fini(struct drm_device *);
-void nvd0_display_intr(struct drm_device *);
-
-void nvd0_display_flip_stop(struct drm_crtc *);
-int  nvd0_display_flip_next(struct drm_crtc *, struct drm_framebuffer *,
+int  nv50_display_flip_next(struct drm_crtc *, struct drm_framebuffer *,
                            struct nouveau_channel *, u32 swap_interval);
 
 struct nouveau_bo *nv50_display_crtc_sema(struct drm_device *, int head);
-struct nouveau_bo *nvd0_display_crtc_sema(struct drm_device *, int head);
 
 #endif /* __NV50_DISPLAY_H__ */
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c
deleted file mode 100644 (file)
index 9f6f55c..0000000
+++ /dev/null
@@ -1,403 +0,0 @@
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <drm/drmP.h>
-
-#include "nouveau_drm.h"
-#include "nouveau_dma.h"
-#include "nv50_display.h"
-
-#include <core/gpuobj.h>
-
-#include <subdev/timer.h>
-#include <subdev/fb.h>
-
-static u32
-nv50_evo_rd32(struct nouveau_object *object, u32 addr)
-{
-       void __iomem *iomem = object->oclass->ofuncs->rd08;
-       return ioread32_native(iomem + addr);
-}
-
-static void
-nv50_evo_wr32(struct nouveau_object *object, u32 addr, u32 data)
-{
-       void __iomem *iomem = object->oclass->ofuncs->rd08;
-       iowrite32_native(data, iomem + addr);
-}
-
-static void
-nv50_evo_channel_del(struct nouveau_channel **pevo)
-{
-       struct nouveau_channel *evo = *pevo;
-
-       if (!evo)
-               return;
-       *pevo = NULL;
-
-       nouveau_bo_unmap(evo->push.buffer);
-       nouveau_bo_ref(NULL, &evo->push.buffer);
-
-       if (evo->object)
-               iounmap(evo->object->oclass->ofuncs);
-
-       kfree(evo);
-}
-
-int
-nv50_evo_dmaobj_new(struct nouveau_channel *evo, u32 handle, u32 memtype,
-                   u64 base, u64 size, struct nouveau_gpuobj **pobj)
-{
-       struct drm_device *dev = evo->fence;
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nv50_display *disp = nv50_display(dev);
-       u32 dmao = disp->dmao;
-       u32 hash = disp->hash;
-       u32 flags5;
-
-       if (nv_device(drm->device)->chipset < 0xc0) {
-               /* not supported on 0x50, specified in format mthd */
-               if (nv_device(drm->device)->chipset == 0x50)
-                       memtype = 0;
-               flags5 = 0x00010000;
-       } else {
-               if (memtype & 0x80000000)
-                       flags5 = 0x00000000; /* large pages */
-               else
-                       flags5 = 0x00020000;
-       }
-
-       nv_wo32(disp->ramin, dmao + 0x00, 0x0019003d | (memtype << 22));
-       nv_wo32(disp->ramin, dmao + 0x04, lower_32_bits(base + size - 1));
-       nv_wo32(disp->ramin, dmao + 0x08, lower_32_bits(base));
-       nv_wo32(disp->ramin, dmao + 0x0c, upper_32_bits(base + size - 1) << 24 |
-                                         upper_32_bits(base));
-       nv_wo32(disp->ramin, dmao + 0x10, 0x00000000);
-       nv_wo32(disp->ramin, dmao + 0x14, flags5);
-
-       nv_wo32(disp->ramin, hash + 0x00, handle);
-       nv_wo32(disp->ramin, hash + 0x04, (evo->handle << 28) | (dmao << 10) |
-                                          evo->handle);
-
-       disp->dmao += 0x20;
-       disp->hash += 0x08;
-       return 0;
-}
-
-static int
-nv50_evo_channel_new(struct drm_device *dev, int chid,
-                    struct nouveau_channel **pevo)
-{
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nv50_display *disp = nv50_display(dev);
-       struct nouveau_channel *evo;
-       int ret;
-
-       evo = kzalloc(sizeof(struct nouveau_channel), GFP_KERNEL);
-       if (!evo)
-               return -ENOMEM;
-       *pevo = evo;
-
-       evo->drm = drm;
-       evo->handle = chid;
-       evo->fence = dev;
-       evo->user_get = 4;
-       evo->user_put = 0;
-
-       ret = nouveau_bo_new(dev, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0, NULL,
-                            &evo->push.buffer);
-       if (ret == 0)
-               ret = nouveau_bo_pin(evo->push.buffer, TTM_PL_FLAG_VRAM);
-       if (ret) {
-               NV_ERROR(drm, "Error creating EVO DMA push buffer: %d\n", ret);
-               nv50_evo_channel_del(pevo);
-               return ret;
-       }
-
-       ret = nouveau_bo_map(evo->push.buffer);
-       if (ret) {
-               NV_ERROR(drm, "Error mapping EVO DMA push buffer: %d\n", ret);
-               nv50_evo_channel_del(pevo);
-               return ret;
-       }
-
-       evo->object = kzalloc(sizeof(*evo->object), GFP_KERNEL);
-#ifdef NOUVEAU_OBJECT_MAGIC
-       evo->object->_magic = NOUVEAU_OBJECT_MAGIC;
-#endif
-       evo->object->parent = nv_object(disp->ramin)->parent;
-       evo->object->engine = nv_object(disp->ramin)->engine;
-       evo->object->oclass =
-               kzalloc(sizeof(*evo->object->oclass), GFP_KERNEL);
-       evo->object->oclass->ofuncs =
-               kzalloc(sizeof(*evo->object->oclass->ofuncs), GFP_KERNEL);
-       evo->object->oclass->ofuncs->rd32 = nv50_evo_rd32;
-       evo->object->oclass->ofuncs->wr32 = nv50_evo_wr32;
-       evo->object->oclass->ofuncs->rd08 =
-               ioremap(pci_resource_start(dev->pdev, 0) +
-                       NV50_PDISPLAY_USER(evo->handle), PAGE_SIZE);
-       return 0;
-}
-
-static int
-nv50_evo_channel_init(struct nouveau_channel *evo)
-{
-       struct nouveau_drm *drm = evo->drm;
-       struct nouveau_device *device = nv_device(drm->device);
-       int id = evo->handle, ret, i;
-       u64 pushbuf = evo->push.buffer->bo.offset;
-       u32 tmp;
-
-       tmp = nv_rd32(device, NV50_PDISPLAY_EVO_CTRL(id));
-       if ((tmp & 0x009f0000) == 0x00020000)
-               nv_wr32(device, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00800000);
-
-       tmp = nv_rd32(device, NV50_PDISPLAY_EVO_CTRL(id));
-       if ((tmp & 0x003f0000) == 0x00030000)
-               nv_wr32(device, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00600000);
-
-       /* initialise fifo */
-       nv_wr32(device, NV50_PDISPLAY_EVO_DMA_CB(id), pushbuf >> 8 |
-                    NV50_PDISPLAY_EVO_DMA_CB_LOCATION_VRAM |
-                    NV50_PDISPLAY_EVO_DMA_CB_VALID);
-       nv_wr32(device, NV50_PDISPLAY_EVO_UNK2(id), 0x00010000);
-       nv_wr32(device, NV50_PDISPLAY_EVO_HASH_TAG(id), id);
-       nv_mask(device, NV50_PDISPLAY_EVO_CTRL(id), NV50_PDISPLAY_EVO_CTRL_DMA,
-                    NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED);
-
-       nv_wr32(device, NV50_PDISPLAY_USER_PUT(id), 0x00000000);
-       nv_wr32(device, NV50_PDISPLAY_EVO_CTRL(id), 0x01000003 |
-                    NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED);
-       if (!nv_wait(device, NV50_PDISPLAY_EVO_CTRL(id), 0x80000000, 0x00000000)) {
-               NV_ERROR(drm, "EvoCh %d init timeout: 0x%08x\n", id,
-                        nv_rd32(device, NV50_PDISPLAY_EVO_CTRL(id)));
-               return -EBUSY;
-       }
-
-       /* enable error reporting on the channel */
-       nv_mask(device, 0x610028, 0x00000000, 0x00010001 << id);
-
-       evo->dma.max = (4096/4) - 2;
-       evo->dma.max &= ~7;
-       evo->dma.put = 0;
-       evo->dma.cur = evo->dma.put;
-       evo->dma.free = evo->dma.max - evo->dma.cur;
-
-       ret = RING_SPACE(evo, NOUVEAU_DMA_SKIPS);
-       if (ret)
-               return ret;
-
-       for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
-               OUT_RING(evo, 0);
-
-       return 0;
-}
-
-static void
-nv50_evo_channel_fini(struct nouveau_channel *evo)
-{
-       struct nouveau_drm *drm = evo->drm;
-       struct nouveau_device *device = nv_device(drm->device);
-       int id = evo->handle;
-
-       nv_mask(device, 0x610028, 0x00010001 << id, 0x00000000);
-       nv_mask(device, NV50_PDISPLAY_EVO_CTRL(id), 0x00001010, 0x00001000);
-       nv_wr32(device, NV50_PDISPLAY_INTR_0, (1 << id));
-       nv_mask(device, NV50_PDISPLAY_EVO_CTRL(id), 0x00000003, 0x00000000);
-       if (!nv_wait(device, NV50_PDISPLAY_EVO_CTRL(id), 0x001e0000, 0x00000000)) {
-               NV_ERROR(drm, "EvoCh %d takedown timeout: 0x%08x\n", id,
-                        nv_rd32(device, NV50_PDISPLAY_EVO_CTRL(id)));
-       }
-}
-
-void
-nv50_evo_destroy(struct drm_device *dev)
-{
-       struct nv50_display *disp = nv50_display(dev);
-       int i;
-
-       for (i = 0; i < 2; i++) {
-               if (disp->crtc[i].sem.bo) {
-                       nouveau_bo_unmap(disp->crtc[i].sem.bo);
-                       nouveau_bo_ref(NULL, &disp->crtc[i].sem.bo);
-               }
-               nv50_evo_channel_del(&disp->crtc[i].sync);
-       }
-       nv50_evo_channel_del(&disp->master);
-       nouveau_gpuobj_ref(NULL, &disp->ramin);
-}
-
-int
-nv50_evo_create(struct drm_device *dev)
-{
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nouveau_fb *pfb = nouveau_fb(drm->device);
-       struct nv50_display *disp = nv50_display(dev);
-       struct nouveau_channel *evo;
-       int ret, i, j;
-
-       /* setup object management on it, any other evo channel will
-        * use this also as there's no per-channel support on the
-        * hardware
-        */
-       ret = nouveau_gpuobj_new(drm->device, NULL, 32768, 65536,
-                                NVOBJ_FLAG_ZERO_ALLOC, &disp->ramin);
-       if (ret) {
-               NV_ERROR(drm, "Error allocating EVO channel memory: %d\n", ret);
-               goto err;
-       }
-
-       disp->hash = 0x0000;
-       disp->dmao = 0x1000;
-
-       /* create primary evo channel, the one we use for modesetting
-        * purporses
-        */
-       ret = nv50_evo_channel_new(dev, 0, &disp->master);
-       if (ret)
-               return ret;
-       evo = disp->master;
-
-       ret = nv50_evo_dmaobj_new(disp->master, NvEvoSync, 0x0000,
-                                 disp->ramin->addr + 0x2000, 0x1000, NULL);
-       if (ret)
-               goto err;
-
-       /* create some default objects for the scanout memtypes we support */
-       ret = nv50_evo_dmaobj_new(disp->master, NvEvoVRAM, 0x0000,
-                                 0, pfb->ram.size, NULL);
-       if (ret)
-               goto err;
-
-       ret = nv50_evo_dmaobj_new(disp->master, NvEvoVRAM_LP, 0x80000000,
-                                 0, pfb->ram.size, NULL);
-       if (ret)
-               goto err;
-
-       ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB32, 0x80000000 |
-                                 (nv_device(drm->device)->chipset < 0xc0 ? 0x7a : 0xfe),
-                                 0, pfb->ram.size, NULL);
-       if (ret)
-               goto err;
-
-       ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB16, 0x80000000 |
-                                 (nv_device(drm->device)->chipset < 0xc0 ? 0x70 : 0xfe),
-                                 0, pfb->ram.size, NULL);
-       if (ret)
-               goto err;
-
-       /* create "display sync" channels and other structures we need
-        * to implement page flipping
-        */
-       for (i = 0; i < 2; i++) {
-               struct nv50_display_crtc *dispc = &disp->crtc[i];
-               u64 offset;
-
-               ret = nv50_evo_channel_new(dev, 1 + i, &dispc->sync);
-               if (ret)
-                       goto err;
-
-               ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
-                                    0, 0x0000, NULL, &dispc->sem.bo);
-               if (!ret) {
-                       ret = nouveau_bo_pin(dispc->sem.bo, TTM_PL_FLAG_VRAM);
-                       if (!ret)
-                               ret = nouveau_bo_map(dispc->sem.bo);
-                       if (ret)
-                               nouveau_bo_ref(NULL, &dispc->sem.bo);
-                       offset = dispc->sem.bo->bo.offset;
-               }
-
-               if (ret)
-                       goto err;
-
-               ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoSync, 0x0000,
-                                         offset, 4096, NULL);
-               if (ret)
-                       goto err;
-
-               ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoVRAM_LP, 0x80000000,
-                                         0, pfb->ram.size, NULL);
-               if (ret)
-                       goto err;
-
-               ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB32, 0x80000000 |
-                                         (nv_device(drm->device)->chipset < 0xc0 ?
-                                         0x7a : 0xfe),
-                                         0, pfb->ram.size, NULL);
-               if (ret)
-                       goto err;
-
-               ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB16, 0x80000000 |
-                                         (nv_device(drm->device)->chipset < 0xc0 ?
-                                         0x70 : 0xfe),
-                                         0, pfb->ram.size, NULL);
-               if (ret)
-                       goto err;
-
-               for (j = 0; j < 4096; j += 4)
-                       nouveau_bo_wr32(dispc->sem.bo, j / 4, 0x74b1e000);
-               dispc->sem.offset = 0;
-       }
-
-       return 0;
-
-err:
-       nv50_evo_destroy(dev);
-       return ret;
-}
-
-int
-nv50_evo_init(struct drm_device *dev)
-{
-       struct nv50_display *disp = nv50_display(dev);
-       int ret, i;
-
-       ret = nv50_evo_channel_init(disp->master);
-       if (ret)
-               return ret;
-
-       for (i = 0; i < 2; i++) {
-               ret = nv50_evo_channel_init(disp->crtc[i].sync);
-               if (ret)
-                       return ret;
-       }
-
-       return 0;
-}
-
-void
-nv50_evo_fini(struct drm_device *dev)
-{
-       struct nv50_display *disp = nv50_display(dev);
-       int i;
-
-       for (i = 0; i < 2; i++) {
-               if (disp->crtc[i].sync)
-                       nv50_evo_channel_fini(disp->crtc[i].sync);
-       }
-
-       if (disp->master)
-               nv50_evo_channel_fini(disp->master);
-}
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.h b/drivers/gpu/drm/nouveau/nv50_evo.h
deleted file mode 100644 (file)
index 771d879..0000000
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- * Copyright (C) 2008 Maarten Maathuis.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef __NV50_EVO_H__
-#define __NV50_EVO_H__
-
-#define NV50_EVO_UPDATE                                              0x00000080
-#define NV50_EVO_UNK84                                               0x00000084
-#define NV50_EVO_UNK84_NOTIFY                                        0x40000000
-#define NV50_EVO_UNK84_NOTIFY_DISABLED                               0x00000000
-#define NV50_EVO_UNK84_NOTIFY_ENABLED                                0x40000000
-#define NV50_EVO_DMA_NOTIFY                                          0x00000088
-#define NV50_EVO_DMA_NOTIFY_HANDLE                                   0xffffffff
-#define NV50_EVO_DMA_NOTIFY_HANDLE_NONE                              0x00000000
-#define NV50_EVO_UNK8C                                               0x0000008C
-
-#define NV50_EVO_DAC(n, r)                       ((n) * 0x80 + NV50_EVO_DAC_##r)
-#define NV50_EVO_DAC_MODE_CTRL                                       0x00000400
-#define NV50_EVO_DAC_MODE_CTRL_CRTC0                                 0x00000001
-#define NV50_EVO_DAC_MODE_CTRL_CRTC1                                 0x00000002
-#define NV50_EVO_DAC_MODE_CTRL2                                      0x00000404
-#define NV50_EVO_DAC_MODE_CTRL2_NHSYNC                               0x00000001
-#define NV50_EVO_DAC_MODE_CTRL2_NVSYNC                               0x00000002
-
-#define NV50_EVO_SOR(n, r)                       ((n) * 0x40 + NV50_EVO_SOR_##r)
-#define NV50_EVO_SOR_MODE_CTRL                                       0x00000600
-#define NV50_EVO_SOR_MODE_CTRL_CRTC0                                 0x00000001
-#define NV50_EVO_SOR_MODE_CTRL_CRTC1                                 0x00000002
-#define NV50_EVO_SOR_MODE_CTRL_TMDS                                  0x00000100
-#define NV50_EVO_SOR_MODE_CTRL_TMDS_DUAL_LINK                        0x00000400
-#define NV50_EVO_SOR_MODE_CTRL_NHSYNC                                0x00001000
-#define NV50_EVO_SOR_MODE_CTRL_NVSYNC                                0x00002000
-
-#define NV50_EVO_CRTC(n, r)                    ((n) * 0x400 + NV50_EVO_CRTC_##r)
-#define NV84_EVO_CRTC(n, r)                    ((n) * 0x400 + NV84_EVO_CRTC_##r)
-#define NV50_EVO_CRTC_UNK0800                                        0x00000800
-#define NV50_EVO_CRTC_CLOCK                                          0x00000804
-#define NV50_EVO_CRTC_INTERLACE                                      0x00000808
-#define NV50_EVO_CRTC_DISPLAY_START                                  0x00000810
-#define NV50_EVO_CRTC_DISPLAY_TOTAL                                  0x00000814
-#define NV50_EVO_CRTC_SYNC_DURATION                                  0x00000818
-#define NV50_EVO_CRTC_SYNC_START_TO_BLANK_END                        0x0000081c
-#define NV50_EVO_CRTC_UNK0820                                        0x00000820
-#define NV50_EVO_CRTC_UNK0824                                        0x00000824
-#define NV50_EVO_CRTC_UNK082C                                        0x0000082c
-#define NV50_EVO_CRTC_CLUT_MODE                                      0x00000840
-/* You can't have a palette in 8 bit mode (=OFF) */
-#define NV50_EVO_CRTC_CLUT_MODE_BLANK                                0x00000000
-#define NV50_EVO_CRTC_CLUT_MODE_OFF                                  0x80000000
-#define NV50_EVO_CRTC_CLUT_MODE_ON                                   0xC0000000
-#define NV50_EVO_CRTC_CLUT_OFFSET                                    0x00000844
-#define NV84_EVO_CRTC_CLUT_DMA                                       0x0000085C
-#define NV84_EVO_CRTC_CLUT_DMA_HANDLE                                0xffffffff
-#define NV84_EVO_CRTC_CLUT_DMA_HANDLE_NONE                           0x00000000
-#define NV50_EVO_CRTC_FB_OFFSET                                      0x00000860
-#define NV50_EVO_CRTC_FB_SIZE                                        0x00000868
-#define NV50_EVO_CRTC_FB_CONFIG                                      0x0000086c
-#define NV50_EVO_CRTC_FB_CONFIG_MODE                                 0x00100000
-#define NV50_EVO_CRTC_FB_CONFIG_MODE_TILE                            0x00000000
-#define NV50_EVO_CRTC_FB_CONFIG_MODE_PITCH                           0x00100000
-#define NV50_EVO_CRTC_FB_DEPTH                                       0x00000870
-#define NV50_EVO_CRTC_FB_DEPTH_8                                     0x00001e00
-#define NV50_EVO_CRTC_FB_DEPTH_15                                    0x0000e900
-#define NV50_EVO_CRTC_FB_DEPTH_16                                    0x0000e800
-#define NV50_EVO_CRTC_FB_DEPTH_24                                    0x0000cf00
-#define NV50_EVO_CRTC_FB_DEPTH_30                                    0x0000d100
-#define NV50_EVO_CRTC_FB_DMA                                         0x00000874
-#define NV50_EVO_CRTC_FB_DMA_HANDLE                                  0xffffffff
-#define NV50_EVO_CRTC_FB_DMA_HANDLE_NONE                             0x00000000
-#define NV50_EVO_CRTC_CURSOR_CTRL                                    0x00000880
-#define NV50_EVO_CRTC_CURSOR_CTRL_HIDE                               0x05000000
-#define NV50_EVO_CRTC_CURSOR_CTRL_SHOW                               0x85000000
-#define NV50_EVO_CRTC_CURSOR_OFFSET                                  0x00000884
-#define NV84_EVO_CRTC_CURSOR_DMA                                     0x0000089c
-#define NV84_EVO_CRTC_CURSOR_DMA_HANDLE                              0xffffffff
-#define NV84_EVO_CRTC_CURSOR_DMA_HANDLE_NONE                         0x00000000
-#define NV50_EVO_CRTC_DITHER_CTRL                                    0x000008a0
-#define NV50_EVO_CRTC_DITHER_CTRL_OFF                                0x00000000
-#define NV50_EVO_CRTC_DITHER_CTRL_ON                                 0x00000011
-#define NV50_EVO_CRTC_SCALE_CTRL                                     0x000008a4
-#define NV50_EVO_CRTC_SCALE_CTRL_INACTIVE                            0x00000000
-#define NV50_EVO_CRTC_SCALE_CTRL_ACTIVE                              0x00000009
-#define NV50_EVO_CRTC_COLOR_CTRL                                     0x000008a8
-#define NV50_EVO_CRTC_COLOR_CTRL_VIBRANCE                            0x000fff00
-#define NV50_EVO_CRTC_COLOR_CTRL_HUE                                 0xfff00000
-#define NV50_EVO_CRTC_FB_POS                                         0x000008c0
-#define NV50_EVO_CRTC_REAL_RES                                       0x000008c8
-#define NV50_EVO_CRTC_SCALE_CENTER_OFFSET                            0x000008d4
-#define NV50_EVO_CRTC_SCALE_CENTER_OFFSET_VAL(x, y) \
-       ((((unsigned)y << 16) & 0xFFFF0000) | (((unsigned)x) & 0x0000FFFF))
-/* Both of these are needed, otherwise nothing happens. */
-#define NV50_EVO_CRTC_SCALE_RES1                                     0x000008d8
-#define NV50_EVO_CRTC_SCALE_RES2                                     0x000008dc
-#define NV50_EVO_CRTC_UNK900                                         0x00000900
-#define NV50_EVO_CRTC_UNK904                                         0x00000904
-
-#endif
index e0763ea..c20f272 100644 (file)
@@ -110,8 +110,11 @@ nv50_fence_create(struct nouveau_drm *drm)
                             0, 0x0000, NULL, &priv->bo);
        if (!ret) {
                ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
-               if (!ret)
+               if (!ret) {
                        ret = nouveau_bo_map(priv->bo);
+                       if (ret)
+                               nouveau_bo_unpin(priv->bo);
+               }
                if (ret)
                        nouveau_bo_ref(NULL, &priv->bo);
        }
index c4a6503..8bd5d27 100644 (file)
@@ -546,7 +546,7 @@ calc_mclk(struct drm_device *dev, struct nouveau_pm_level *perflvl,
 {
        struct nouveau_drm *drm = nouveau_drm(dev);
        struct nouveau_device *device = nouveau_dev(dev);
-       u32 crtc_mask = nv50_display_active_crtcs(dev);
+       u32 crtc_mask = 0; /*XXX: nv50_display_active_crtcs(dev); */
        struct nouveau_mem_exec_func exec = {
                .dev = dev,
                .precharge = mclk_precharge,
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
deleted file mode 100644 (file)
index b562b59..0000000
+++ /dev/null
@@ -1,530 +0,0 @@
-/*
- * Copyright (C) 2008 Maarten Maathuis.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
-
-#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
-#include "nouveau_reg.h"
-#include "nouveau_drm.h"
-#include "nouveau_dma.h"
-#include "nouveau_encoder.h"
-#include "nouveau_connector.h"
-#include "nouveau_crtc.h"
-#include "nv50_display.h"
-
-#include <subdev/timer.h>
-
-static u32
-nv50_sor_dp_lane_map(struct drm_device *dev, struct dcb_output *dcb, u8 lane)
-{
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       static const u8 nvaf[] = { 24, 16, 8, 0 }; /* thanks, apple.. */
-       static const u8 nv50[] = { 16, 8, 0, 24 };
-       if (nv_device(drm->device)->chipset == 0xaf)
-               return nvaf[lane];
-       return nv50[lane];
-}
-
-static void
-nv50_sor_dp_train_set(struct drm_device *dev, struct dcb_output *dcb, u8 pattern)
-{
-       struct nouveau_device *device = nouveau_dev(dev);
-       u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
-       nv_mask(device, NV50_SOR_DP_CTRL(or, link), 0x0f000000, pattern << 24);
-}
-
-static void
-nv50_sor_dp_train_adj(struct drm_device *dev, struct dcb_output *dcb,
-                     u8 lane, u8 swing, u8 preem)
-{
-       struct nouveau_device *device = nouveau_dev(dev);
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
-       u32 shift = nv50_sor_dp_lane_map(dev, dcb, lane);
-       u32 mask = 0x000000ff << shift;
-       u8 *table, *entry, *config;
-
-       table = nouveau_dp_bios_data(dev, dcb, &entry);
-       if (!table || (table[0] != 0x20 && table[0] != 0x21)) {
-               NV_ERROR(drm, "PDISP: unsupported DP table for chipset\n");
-               return;
-       }
-
-       config = entry + table[4];
-       while (config[0] != swing || config[1] != preem) {
-               config += table[5];
-               if (config >= entry + table[4] + entry[4] * table[5])
-                       return;
-       }
-
-       nv_mask(device, NV50_SOR_DP_UNK118(or, link), mask, config[2] << shift);
-       nv_mask(device, NV50_SOR_DP_UNK120(or, link), mask, config[3] << shift);
-       nv_mask(device, NV50_SOR_DP_UNK130(or, link), 0x0000ff00, config[4] << 8);
-}
-
-static void
-nv50_sor_dp_link_set(struct drm_device *dev, struct dcb_output *dcb, int crtc,
-                    int link_nr, u32 link_bw, bool enhframe)
-{
-       struct nouveau_device *device = nouveau_dev(dev);
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
-       u32 dpctrl = nv_rd32(device, NV50_SOR_DP_CTRL(or, link)) & ~0x001f4000;
-       u32 clksor = nv_rd32(device, 0x614300 + (or * 0x800)) & ~0x000c0000;
-       u8 *table, *entry, mask;
-       int i;
-
-       table = nouveau_dp_bios_data(dev, dcb, &entry);
-       if (!table || (table[0] != 0x20 && table[0] != 0x21)) {
-               NV_ERROR(drm, "PDISP: unsupported DP table for chipset\n");
-               return;
-       }
-
-       entry = ROMPTR(dev, entry[10]);
-       if (entry) {
-               while (link_bw < ROM16(entry[0]) * 10)
-                       entry += 4;
-
-               nouveau_bios_run_init_table(dev, ROM16(entry[2]), dcb, crtc);
-       }
-
-       dpctrl |= ((1 << link_nr) - 1) << 16;
-       if (enhframe)
-               dpctrl |= 0x00004000;
-
-       if (link_bw > 162000)
-               clksor |= 0x00040000;
-
-       nv_wr32(device, 0x614300 + (or * 0x800), clksor);
-       nv_wr32(device, NV50_SOR_DP_CTRL(or, link), dpctrl);
-
-       mask = 0;
-       for (i = 0; i < link_nr; i++)
-               mask |= 1 << (nv50_sor_dp_lane_map(dev, dcb, i) >> 3);
-       nv_mask(device, NV50_SOR_DP_UNK130(or, link), 0x0000000f, mask);
-}
-
-static void
-nv50_sor_dp_link_get(struct drm_device *dev, u32 or, u32 link, u32 *nr, u32 *bw)
-{
-       struct nouveau_device *device = nouveau_dev(dev);
-       u32 dpctrl = nv_rd32(device, NV50_SOR_DP_CTRL(or, link)) & 0x000f0000;
-       u32 clksor = nv_rd32(device, 0x614300 + (or * 0x800));
-       if (clksor & 0x000c0000)
-               *bw = 270000;
-       else
-               *bw = 162000;
-
-       if      (dpctrl > 0x00030000) *nr = 4;
-       else if (dpctrl > 0x00010000) *nr = 2;
-       else                          *nr = 1;
-}
-
-void
-nv50_sor_dp_calc_tu(struct drm_device *dev, int or, int link, u32 clk, u32 bpp)
-{
-       struct nouveau_device *device = nouveau_dev(dev);
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       const u32 symbol = 100000;
-       int bestTU = 0, bestVTUi = 0, bestVTUf = 0, bestVTUa = 0;
-       int TU, VTUi, VTUf, VTUa;
-       u64 link_data_rate, link_ratio, unk;
-       u32 best_diff = 64 * symbol;
-       u32 link_nr, link_bw, r;
-
-       /* calculate packed data rate for each lane */
-       nv50_sor_dp_link_get(dev, or, link, &link_nr, &link_bw);
-       link_data_rate = (clk * bpp / 8) / link_nr;
-
-       /* calculate ratio of packed data rate to link symbol rate */
-       link_ratio = link_data_rate * symbol;
-       r = do_div(link_ratio, link_bw);
-
-       for (TU = 64; TU >= 32; TU--) {
-               /* calculate average number of valid symbols in each TU */
-               u32 tu_valid = link_ratio * TU;
-               u32 calc, diff;
-
-               /* find a hw representation for the fraction.. */
-               VTUi = tu_valid / symbol;
-               calc = VTUi * symbol;
-               diff = tu_valid - calc;
-               if (diff) {
-                       if (diff >= (symbol / 2)) {
-                               VTUf = symbol / (symbol - diff);
-                               if (symbol - (VTUf * diff))
-                                       VTUf++;
-
-                               if (VTUf <= 15) {
-                                       VTUa  = 1;
-                                       calc += symbol - (symbol / VTUf);
-                               } else {
-                                       VTUa  = 0;
-                                       VTUf  = 1;
-                                       calc += symbol;
-                               }
-                       } else {
-                               VTUa  = 0;
-                               VTUf  = min((int)(symbol / diff), 15);
-                               calc += symbol / VTUf;
-                       }
-
-                       diff = calc - tu_valid;
-               } else {
-                       /* no remainder, but the hw doesn't like the fractional
-                        * part to be zero.  decrement the integer part and
-                        * have the fraction add a whole symbol back
-                        */
-                       VTUa = 0;
-                       VTUf = 1;
-                       VTUi--;
-               }
-
-               if (diff < best_diff) {
-                       best_diff = diff;
-                       bestTU = TU;
-                       bestVTUa = VTUa;
-                       bestVTUf = VTUf;
-                       bestVTUi = VTUi;
-                       if (diff == 0)
-                               break;
-               }
-       }
-
-       if (!bestTU) {
-               NV_ERROR(drm, "DP: unable to find suitable config\n");
-               return;
-       }
-
-       /* XXX close to vbios numbers, but not right */
-       unk  = (symbol - link_ratio) * bestTU;
-       unk *= link_ratio;
-       r = do_div(unk, symbol);
-       r = do_div(unk, symbol);
-       unk += 6;
-
-       nv_mask(device, NV50_SOR_DP_CTRL(or, link), 0x000001fc, bestTU << 2);
-       nv_mask(device, NV50_SOR_DP_SCFG(or, link), 0x010f7f3f, bestVTUa << 24 |
-                                                            bestVTUf << 16 |
-                                                            bestVTUi << 8 |
-                                                            unk);
-}
-static void
-nv50_sor_disconnect(struct drm_encoder *encoder)
-{
-       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-       struct nouveau_drm *drm = nouveau_drm(encoder->dev);
-       struct drm_device *dev = encoder->dev;
-       struct nouveau_channel *evo = nv50_display(dev)->master;
-       int ret;
-
-       if (!nv_encoder->crtc)
-               return;
-       nv50_crtc_blank(nouveau_crtc(nv_encoder->crtc), true);
-
-       NV_DEBUG(drm, "Disconnecting SOR %d\n", nv_encoder->or);
-
-       ret = RING_SPACE(evo, 4);
-       if (ret) {
-               NV_ERROR(drm, "no space while disconnecting SOR\n");
-               return;
-       }
-       BEGIN_NV04(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1);
-       OUT_RING  (evo, 0);
-       BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
-       OUT_RING  (evo, 0);
-
-       nouveau_hdmi_mode_set(encoder, NULL);
-
-       nv_encoder->crtc = NULL;
-       nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
-}
-
-static void
-nv50_sor_dpms(struct drm_encoder *encoder, int mode)
-{
-       struct nouveau_device *device = nouveau_dev(encoder->dev);
-       struct nouveau_drm *drm = nouveau_drm(encoder->dev);
-       struct drm_device *dev = encoder->dev;
-       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-       struct drm_encoder *enc;
-       uint32_t val;
-       int or = nv_encoder->or;
-
-       NV_DEBUG(drm, "or %d type %d mode %d\n", or, nv_encoder->dcb->type, mode);
-
-       nv_encoder->last_dpms = mode;
-       list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
-               struct nouveau_encoder *nvenc = nouveau_encoder(enc);
-
-               if (nvenc == nv_encoder ||
-                   (nvenc->dcb->type != DCB_OUTPUT_TMDS &&
-                    nvenc->dcb->type != DCB_OUTPUT_LVDS &&
-                    nvenc->dcb->type != DCB_OUTPUT_DP) ||
-                   nvenc->dcb->or != nv_encoder->dcb->or)
-                       continue;
-
-               if (nvenc->last_dpms == DRM_MODE_DPMS_ON)
-                       return;
-       }
-
-       /* wait for it to be done */
-       if (!nv_wait(device, NV50_PDISPLAY_SOR_DPMS_CTRL(or),
-                    NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING, 0)) {
-               NV_ERROR(drm, "timeout: SOR_DPMS_CTRL_PENDING(%d) == 0\n", or);
-               NV_ERROR(drm, "SOR_DPMS_CTRL(%d) = 0x%08x\n", or,
-                        nv_rd32(device, NV50_PDISPLAY_SOR_DPMS_CTRL(or)));
-       }
-
-       val = nv_rd32(device, NV50_PDISPLAY_SOR_DPMS_CTRL(or));
-
-       if (mode == DRM_MODE_DPMS_ON)
-               val |= NV50_PDISPLAY_SOR_DPMS_CTRL_ON;
-       else
-               val &= ~NV50_PDISPLAY_SOR_DPMS_CTRL_ON;
-
-       nv_wr32(device, NV50_PDISPLAY_SOR_DPMS_CTRL(or), val |
-               NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING);
-       if (!nv_wait(device, NV50_PDISPLAY_SOR_DPMS_STATE(or),
-                    NV50_PDISPLAY_SOR_DPMS_STATE_WAIT, 0)) {
-               NV_ERROR(drm, "timeout: SOR_DPMS_STATE_WAIT(%d) == 0\n", or);
-               NV_ERROR(drm, "SOR_DPMS_STATE(%d) = 0x%08x\n", or,
-                        nv_rd32(device, NV50_PDISPLAY_SOR_DPMS_STATE(or)));
-       }
-
-       if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
-               struct dp_train_func func = {
-                       .link_set = nv50_sor_dp_link_set,
-                       .train_set = nv50_sor_dp_train_set,
-                       .train_adj = nv50_sor_dp_train_adj
-               };
-
-               nouveau_dp_dpms(encoder, mode, nv_encoder->dp.datarate, &func);
-       }
-}
-
-static void
-nv50_sor_save(struct drm_encoder *encoder)
-{
-       struct nouveau_drm *drm = nouveau_drm(encoder->dev);
-       NV_ERROR(drm, "!!\n");
-}
-
-static void
-nv50_sor_restore(struct drm_encoder *encoder)
-{
-       struct nouveau_drm *drm = nouveau_drm(encoder->dev);
-       NV_ERROR(drm, "!!\n");
-}
-
-static bool
-nv50_sor_mode_fixup(struct drm_encoder *encoder,
-                   const struct drm_display_mode *mode,
-                   struct drm_display_mode *adjusted_mode)
-{
-       struct nouveau_drm *drm = nouveau_drm(encoder->dev);
-       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-       struct nouveau_connector *connector;
-
-       NV_DEBUG(drm, "or %d\n", nv_encoder->or);
-
-       connector = nouveau_encoder_connector_get(nv_encoder);
-       if (!connector) {
-               NV_ERROR(drm, "Encoder has no connector\n");
-               return false;
-       }
-
-       if (connector->scaling_mode != DRM_MODE_SCALE_NONE &&
-            connector->native_mode)
-               drm_mode_copy(adjusted_mode, connector->native_mode);
-
-       return true;
-}
-
-static void
-nv50_sor_prepare(struct drm_encoder *encoder)
-{
-       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-       nv50_sor_disconnect(encoder);
-       if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
-               /* avoid race between link training and supervisor intr */
-               nv50_display_sync(encoder->dev);
-       }
-}
-
-static void
-nv50_sor_commit(struct drm_encoder *encoder)
-{
-}
-
-static void
-nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
-                 struct drm_display_mode *mode)
-{
-       struct nouveau_channel *evo = nv50_display(encoder->dev)->master;
-       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-       struct nouveau_drm *drm = nouveau_drm(encoder->dev);
-       struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc);
-       struct nouveau_connector *nv_connector;
-       uint32_t mode_ctl = 0;
-       int ret;
-
-       NV_DEBUG(drm, "or %d type %d -> crtc %d\n",
-                    nv_encoder->or, nv_encoder->dcb->type, crtc->index);
-       nv_encoder->crtc = encoder->crtc;
-
-       switch (nv_encoder->dcb->type) {
-       case DCB_OUTPUT_TMDS:
-               if (nv_encoder->dcb->sorconf.link & 1) {
-                       if (mode->clock < 165000)
-                               mode_ctl = 0x0100;
-                       else
-                               mode_ctl = 0x0500;
-               } else
-                       mode_ctl = 0x0200;
-
-               nouveau_hdmi_mode_set(encoder, mode);
-               break;
-       case DCB_OUTPUT_DP:
-               nv_connector = nouveau_encoder_connector_get(nv_encoder);
-               if (nv_connector && nv_connector->base.display_info.bpc == 6) {
-                       nv_encoder->dp.datarate = mode->clock * 18 / 8;
-                       mode_ctl |= 0x00020000;
-               } else {
-                       nv_encoder->dp.datarate = mode->clock * 24 / 8;
-                       mode_ctl |= 0x00050000;
-               }
-
-               if (nv_encoder->dcb->sorconf.link & 1)
-                       mode_ctl |= 0x00000800;
-               else
-                       mode_ctl |= 0x00000900;
-               break;
-       default:
-               break;
-       }
-
-       if (crtc->index == 1)
-               mode_ctl |= NV50_EVO_SOR_MODE_CTRL_CRTC1;
-       else
-               mode_ctl |= NV50_EVO_SOR_MODE_CTRL_CRTC0;
-
-       if (mode->flags & DRM_MODE_FLAG_NHSYNC)
-               mode_ctl |= NV50_EVO_SOR_MODE_CTRL_NHSYNC;
-
-       if (mode->flags & DRM_MODE_FLAG_NVSYNC)
-               mode_ctl |= NV50_EVO_SOR_MODE_CTRL_NVSYNC;
-
-       nv50_sor_dpms(encoder, DRM_MODE_DPMS_ON);
-
-       ret = RING_SPACE(evo, 2);
-       if (ret) {
-               NV_ERROR(drm, "no space while connecting SOR\n");
-               nv_encoder->crtc = NULL;
-               return;
-       }
-       BEGIN_NV04(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1);
-       OUT_RING(evo, mode_ctl);
-}
-
-static struct drm_crtc *
-nv50_sor_crtc_get(struct drm_encoder *encoder)
-{
-       return nouveau_encoder(encoder)->crtc;
-}
-
-static const struct drm_encoder_helper_funcs nv50_sor_helper_funcs = {
-       .dpms = nv50_sor_dpms,
-       .save = nv50_sor_save,
-       .restore = nv50_sor_restore,
-       .mode_fixup = nv50_sor_mode_fixup,
-       .prepare = nv50_sor_prepare,
-       .commit = nv50_sor_commit,
-       .mode_set = nv50_sor_mode_set,
-       .get_crtc = nv50_sor_crtc_get,
-       .detect = NULL,
-       .disable = nv50_sor_disconnect
-};
-
-static void
-nv50_sor_destroy(struct drm_encoder *encoder)
-{
-       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-       struct nouveau_drm *drm = nouveau_drm(encoder->dev);
-
-       NV_DEBUG(drm, "\n");
-
-       drm_encoder_cleanup(encoder);
-
-       kfree(nv_encoder);
-}
-
-static const struct drm_encoder_funcs nv50_sor_encoder_funcs = {
-       .destroy = nv50_sor_destroy,
-};
-
-int
-nv50_sor_create(struct drm_connector *connector, struct dcb_output *entry)
-{
-       struct nouveau_encoder *nv_encoder = NULL;
-       struct drm_device *dev = connector->dev;
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct drm_encoder *encoder;
-       int type;
-
-       NV_DEBUG(drm, "\n");
-
-       switch (entry->type) {
-       case DCB_OUTPUT_TMDS:
-       case DCB_OUTPUT_DP:
-               type = DRM_MODE_ENCODER_TMDS;
-               break;
-       case DCB_OUTPUT_LVDS:
-               type = DRM_MODE_ENCODER_LVDS;
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
-       if (!nv_encoder)
-               return -ENOMEM;
-       encoder = to_drm_encoder(nv_encoder);
-
-       nv_encoder->dcb = entry;
-       nv_encoder->or = ffs(entry->or) - 1;
-       nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
-
-       drm_encoder_init(dev, encoder, &nv50_sor_encoder_funcs, type);
-       drm_encoder_helper_add(encoder, &nv50_sor_helper_funcs);
-
-       encoder->possible_crtcs = entry->heads;
-       encoder->possible_clones = 0;
-
-       drm_mode_connector_attach_encoder(connector, encoder);
-       return 0;
-}
index 53299ea..2a56b1b 100644 (file)
@@ -114,17 +114,9 @@ nvc0_fence_context_del(struct nouveau_channel *chan)
        struct nvc0_fence_chan *fctx = chan->fence;
        int i;
 
-       if (nv_device(chan->drm->device)->card_type >= NV_D0) {
-               for (i = 0; i < dev->mode_config.num_crtc; i++) {
-                       struct nouveau_bo *bo = nvd0_display_crtc_sema(dev, i);
-                       nouveau_bo_vma_del(bo, &fctx->dispc_vma[i]);
-               }
-       } else
-       if (nv_device(chan->drm->device)->card_type >= NV_50) {
-               for (i = 0; i < dev->mode_config.num_crtc; i++) {
-                       struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i);
-                       nouveau_bo_vma_del(bo, &fctx->dispc_vma[i]);
-               }
+       for (i = 0; i < dev->mode_config.num_crtc; i++) {
+               struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i);
+               nouveau_bo_vma_del(bo, &fctx->dispc_vma[i]);
        }
 
        nouveau_bo_vma_del(priv->bo, &fctx->vma);
@@ -154,12 +146,7 @@ nvc0_fence_context_new(struct nouveau_channel *chan)
 
        /* map display semaphore buffers into channel's vm */
        for (i = 0; !ret && i < chan->drm->dev->mode_config.num_crtc; i++) {
-               struct nouveau_bo *bo;
-               if (nv_device(chan->drm->device)->card_type >= NV_D0)
-                       bo = nvd0_display_crtc_sema(chan->drm->dev, i);
-               else
-                       bo = nv50_display_crtc_sema(chan->drm->dev, i);
-
+               struct nouveau_bo *bo = nv50_display_crtc_sema(chan->drm->dev, i);
                ret = nouveau_bo_vma_add(bo, client->vm, &fctx->dispc_vma[i]);
        }
 
@@ -203,6 +190,8 @@ nvc0_fence_destroy(struct nouveau_drm *drm)
 {
        struct nvc0_fence_priv *priv = drm->fence;
        nouveau_bo_unmap(priv->bo);
+       if (priv->bo)
+               nouveau_bo_unpin(priv->bo);
        nouveau_bo_ref(NULL, &priv->bo);
        drm->fence = NULL;
        kfree(priv);
@@ -232,8 +221,11 @@ nvc0_fence_create(struct nouveau_drm *drm)
                             TTM_PL_FLAG_VRAM, 0, 0, NULL, &priv->bo);
        if (ret == 0) {
                ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
-               if (ret == 0)
+               if (ret == 0) {
                        ret = nouveau_bo_map(priv->bo);
+                       if (ret)
+                               nouveau_bo_unpin(priv->bo);
+               }
                if (ret)
                        nouveau_bo_ref(NULL, &priv->bo);
        }
diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c
deleted file mode 100644 (file)
index c402fca..0000000
+++ /dev/null
@@ -1,2141 +0,0 @@
-/*
- * Copyright 2011 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <linux/dma-mapping.h>
-
-#include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
-
-#include "nouveau_drm.h"
-#include "nouveau_dma.h"
-#include "nouveau_gem.h"
-#include "nouveau_connector.h"
-#include "nouveau_encoder.h"
-#include "nouveau_crtc.h"
-#include "nouveau_fence.h"
-#include "nv50_display.h"
-
-#include <core/gpuobj.h>
-
-#include <subdev/timer.h>
-#include <subdev/bar.h>
-#include <subdev/fb.h>
-
-#define EVO_DMA_NR 9
-
-#define EVO_MASTER  (0x00)
-#define EVO_FLIP(c) (0x01 + (c))
-#define EVO_OVLY(c) (0x05 + (c))
-#define EVO_OIMM(c) (0x09 + (c))
-#define EVO_CURS(c) (0x0d + (c))
-
-/* offsets in shared sync bo of various structures */
-#define EVO_SYNC(c, o) ((c) * 0x0100 + (o))
-#define EVO_MAST_NTFY     EVO_SYNC(  0, 0x00)
-#define EVO_FLIP_SEM0(c)  EVO_SYNC((c), 0x00)
-#define EVO_FLIP_SEM1(c)  EVO_SYNC((c), 0x10)
-
-struct evo {
-       int idx;
-       dma_addr_t handle;
-       u32 *ptr;
-       struct {
-               u32 offset;
-               u16 value;
-       } sem;
-};
-
-struct nvd0_display {
-       struct nouveau_gpuobj *mem;
-       struct nouveau_bo *sync;
-       struct evo evo[9];
-
-       struct tasklet_struct tasklet;
-       u32 modeset;
-};
-
-static struct nvd0_display *
-nvd0_display(struct drm_device *dev)
-{
-       return nouveau_display(dev)->priv;
-}
-
-static struct drm_crtc *
-nvd0_display_crtc_get(struct drm_encoder *encoder)
-{
-       return nouveau_encoder(encoder)->crtc;
-}
-
-/******************************************************************************
- * EVO channel helpers
- *****************************************************************************/
-static inline int
-evo_icmd(struct drm_device *dev, int id, u32 mthd, u32 data)
-{
-       struct nouveau_device *device = nouveau_dev(dev);
-       int ret = 0;
-       nv_mask(device, 0x610700 + (id * 0x10), 0x00000001, 0x00000001);
-       nv_wr32(device, 0x610704 + (id * 0x10), data);
-       nv_mask(device, 0x610704 + (id * 0x10), 0x80000ffc, 0x80000000 | mthd);
-       if (!nv_wait(device, 0x610704 + (id * 0x10), 0x80000000, 0x00000000))
-               ret = -EBUSY;
-       nv_mask(device, 0x610700 + (id * 0x10), 0x00000001, 0x00000000);
-       return ret;
-}
-
-static u32 *
-evo_wait(struct drm_device *dev, int id, int nr)
-{
-       struct nouveau_device *device = nouveau_dev(dev);
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nvd0_display *disp = nvd0_display(dev);
-       u32 put = nv_rd32(device, 0x640000 + (id * 0x1000)) / 4;
-
-       if (put + nr >= (PAGE_SIZE / 4)) {
-               disp->evo[id].ptr[put] = 0x20000000;
-
-               nv_wr32(device, 0x640000 + (id * 0x1000), 0x00000000);
-               if (!nv_wait(device, 0x640004 + (id * 0x1000), ~0, 0x00000000)) {
-                       NV_ERROR(drm, "evo %d dma stalled\n", id);
-                       return NULL;
-               }
-
-               put = 0;
-       }
-
-       return disp->evo[id].ptr + put;
-}
-
-static void
-evo_kick(u32 *push, struct drm_device *dev, int id)
-{
-       struct nouveau_device *device = nouveau_dev(dev);
-       struct nvd0_display *disp = nvd0_display(dev);
-
-       nv_wr32(device, 0x640000 + (id * 0x1000), (push - disp->evo[id].ptr) << 2);
-}
-
-#define evo_mthd(p,m,s) *((p)++) = (((s) << 18) | (m))
-#define evo_data(p,d)   *((p)++) = (d)
-
-static int
-evo_init_dma(struct drm_device *dev, int ch)
-{
-       struct nouveau_device *device = nouveau_dev(dev);
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nvd0_display *disp = nvd0_display(dev);
-       u32 flags;
-
-       flags = 0x00000000;
-       if (ch == EVO_MASTER)
-               flags |= 0x01000000;
-
-       nv_wr32(device, 0x610494 + (ch * 0x0010), (disp->evo[ch].handle >> 8) | 3);
-       nv_wr32(device, 0x610498 + (ch * 0x0010), 0x00010000);
-       nv_wr32(device, 0x61049c + (ch * 0x0010), 0x00000001);
-       nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000010);
-       nv_wr32(device, 0x640000 + (ch * 0x1000), 0x00000000);
-       nv_wr32(device, 0x610490 + (ch * 0x0010), 0x00000013 | flags);
-       if (!nv_wait(device, 0x610490 + (ch * 0x0010), 0x80000000, 0x00000000)) {
-               NV_ERROR(drm, "PDISP: ch%d 0x%08x\n", ch,
-                             nv_rd32(device, 0x610490 + (ch * 0x0010)));
-               return -EBUSY;
-       }
-
-       nv_mask(device, 0x610090, (1 << ch), (1 << ch));
-       nv_mask(device, 0x6100a0, (1 << ch), (1 << ch));
-       return 0;
-}
-
-static void
-evo_fini_dma(struct drm_device *dev, int ch)
-{
-       struct nouveau_device *device = nouveau_dev(dev);
-
-       if (!(nv_rd32(device, 0x610490 + (ch * 0x0010)) & 0x00000010))
-               return;
-
-       nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000000);
-       nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000003, 0x00000000);
-       nv_wait(device, 0x610490 + (ch * 0x0010), 0x80000000, 0x00000000);
-       nv_mask(device, 0x610090, (1 << ch), 0x00000000);
-       nv_mask(device, 0x6100a0, (1 << ch), 0x00000000);
-}
-
-static inline void
-evo_piow(struct drm_device *dev, int ch, u16 mthd, u32 data)
-{
-       struct nouveau_device *device = nouveau_dev(dev);
-       nv_wr32(device, 0x640000 + (ch * 0x1000) + mthd, data);
-}
-
-static int
-evo_init_pio(struct drm_device *dev, int ch)
-{
-       struct nouveau_device *device = nouveau_dev(dev);
-       struct nouveau_drm *drm = nouveau_drm(dev);
-
-       nv_wr32(device, 0x610490 + (ch * 0x0010), 0x00000001);
-       if (!nv_wait(device, 0x610490 + (ch * 0x0010), 0x00010000, 0x00010000)) {
-               NV_ERROR(drm, "PDISP: ch%d 0x%08x\n", ch,
-                             nv_rd32(device, 0x610490 + (ch * 0x0010)));
-               return -EBUSY;
-       }
-
-       nv_mask(device, 0x610090, (1 << ch), (1 << ch));
-       nv_mask(device, 0x6100a0, (1 << ch), (1 << ch));
-       return 0;
-}
-
-static void
-evo_fini_pio(struct drm_device *dev, int ch)
-{
-       struct nouveau_device *device = nouveau_dev(dev);
-
-       if (!(nv_rd32(device, 0x610490 + (ch * 0x0010)) & 0x00000001))
-               return;
-
-       nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000010);
-       nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000001, 0x00000000);
-       nv_wait(device, 0x610490 + (ch * 0x0010), 0x00010000, 0x00000000);
-       nv_mask(device, 0x610090, (1 << ch), 0x00000000);
-       nv_mask(device, 0x6100a0, (1 << ch), 0x00000000);
-}
-
-static bool
-evo_sync_wait(void *data)
-{
-       return nouveau_bo_rd32(data, EVO_MAST_NTFY) != 0x00000000;
-}
-
-static int
-evo_sync(struct drm_device *dev, int ch)
-{
-       struct nouveau_device *device = nouveau_dev(dev);
-       struct nvd0_display *disp = nvd0_display(dev);
-       u32 *push = evo_wait(dev, ch, 8);
-       if (push) {
-               nouveau_bo_wr32(disp->sync, EVO_MAST_NTFY, 0x00000000);
-               evo_mthd(push, 0x0084, 1);
-               evo_data(push, 0x80000000 | EVO_MAST_NTFY);
-               evo_mthd(push, 0x0080, 2);
-               evo_data(push, 0x00000000);
-               evo_data(push, 0x00000000);
-               evo_kick(push, dev, ch);
-               if (nv_wait_cb(device, evo_sync_wait, disp->sync))
-                       return 0;
-       }
-
-       return -EBUSY;
-}
-
-/******************************************************************************
- * Page flipping channel
- *****************************************************************************/
-struct nouveau_bo *
-nvd0_display_crtc_sema(struct drm_device *dev, int crtc)
-{
-       return nvd0_display(dev)->sync;
-}
-
-void
-nvd0_display_flip_stop(struct drm_crtc *crtc)
-{
-       struct nvd0_display *disp = nvd0_display(crtc->dev);
-       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-       struct evo *evo = &disp->evo[EVO_FLIP(nv_crtc->index)];
-       u32 *push;
-
-       push = evo_wait(crtc->dev, evo->idx, 8);
-       if (push) {
-               evo_mthd(push, 0x0084, 1);
-               evo_data(push, 0x00000000);
-               evo_mthd(push, 0x0094, 1);
-               evo_data(push, 0x00000000);
-               evo_mthd(push, 0x00c0, 1);
-               evo_data(push, 0x00000000);
-               evo_mthd(push, 0x0080, 1);
-               evo_data(push, 0x00000000);
-               evo_kick(push, crtc->dev, evo->idx);
-       }
-}
-
-int
-nvd0_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
-                      struct nouveau_channel *chan, u32 swap_interval)
-{
-       struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
-       struct nvd0_display *disp = nvd0_display(crtc->dev);
-       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-       struct evo *evo = &disp->evo[EVO_FLIP(nv_crtc->index)];
-       u64 offset;
-       u32 *push;
-       int ret;
-
-       swap_interval <<= 4;
-       if (swap_interval == 0)
-               swap_interval |= 0x100;
-
-       push = evo_wait(crtc->dev, evo->idx, 128);
-       if (unlikely(push == NULL))
-               return -EBUSY;
-
-       /* synchronise with the rendering channel, if necessary */
-       if (likely(chan)) {
-               ret = RING_SPACE(chan, 10);
-               if (ret)
-                       return ret;
-
-
-               offset  = nvc0_fence_crtc(chan, nv_crtc->index);
-               offset += evo->sem.offset;
-
-               BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
-               OUT_RING  (chan, upper_32_bits(offset));
-               OUT_RING  (chan, lower_32_bits(offset));
-               OUT_RING  (chan, 0xf00d0000 | evo->sem.value);
-               OUT_RING  (chan, 0x1002);
-               BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
-               OUT_RING  (chan, upper_32_bits(offset));
-               OUT_RING  (chan, lower_32_bits(offset ^ 0x10));
-               OUT_RING  (chan, 0x74b1e000);
-               OUT_RING  (chan, 0x1001);
-               FIRE_RING (chan);
-       } else {
-               nouveau_bo_wr32(disp->sync, evo->sem.offset / 4,
-                               0xf00d0000 | evo->sem.value);
-               evo_sync(crtc->dev, EVO_MASTER);
-       }
-
-       /* queue the flip */
-       evo_mthd(push, 0x0100, 1);
-       evo_data(push, 0xfffe0000);
-       evo_mthd(push, 0x0084, 1);
-       evo_data(push, swap_interval);
-       if (!(swap_interval & 0x00000100)) {
-               evo_mthd(push, 0x00e0, 1);
-               evo_data(push, 0x40000000);
-       }
-       evo_mthd(push, 0x0088, 4);
-       evo_data(push, evo->sem.offset);
-       evo_data(push, 0xf00d0000 | evo->sem.value);
-       evo_data(push, 0x74b1e000);
-       evo_data(push, NvEvoSync);
-       evo_mthd(push, 0x00a0, 2);
-       evo_data(push, 0x00000000);
-       evo_data(push, 0x00000000);
-       evo_mthd(push, 0x00c0, 1);
-       evo_data(push, nv_fb->r_dma);
-       evo_mthd(push, 0x0110, 2);
-       evo_data(push, 0x00000000);
-       evo_data(push, 0x00000000);
-       evo_mthd(push, 0x0400, 5);
-       evo_data(push, nv_fb->nvbo->bo.offset >> 8);
-       evo_data(push, 0);
-       evo_data(push, (fb->height << 16) | fb->width);
-       evo_data(push, nv_fb->r_pitch);
-       evo_data(push, nv_fb->r_format);
-       evo_mthd(push, 0x0080, 1);
-       evo_data(push, 0x00000000);
-       evo_kick(push, crtc->dev, evo->idx);
-
-       evo->sem.offset ^= 0x10;
-       evo->sem.value++;
-       return 0;
-}
-
-/******************************************************************************
- * CRTC
- *****************************************************************************/
-static int
-nvd0_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update)
-{
-       struct nouveau_drm *drm = nouveau_drm(nv_crtc->base.dev);
-       struct drm_device *dev = nv_crtc->base.dev;
-       struct nouveau_connector *nv_connector;
-       struct drm_connector *connector;
-       u32 *push, mode = 0x00;
-       u32 mthd;
-
-       nv_connector = nouveau_crtc_connector_get(nv_crtc);
-       connector = &nv_connector->base;
-       if (nv_connector->dithering_mode == DITHERING_MODE_AUTO) {
-               if (nv_crtc->base.fb->depth > connector->display_info.bpc * 3)
-                       mode = DITHERING_MODE_DYNAMIC2X2;
-       } else {
-               mode = nv_connector->dithering_mode;
-       }
-
-       if (nv_connector->dithering_depth == DITHERING_DEPTH_AUTO) {
-               if (connector->display_info.bpc >= 8)
-                       mode |= DITHERING_DEPTH_8BPC;
-       } else {
-               mode |= nv_connector->dithering_depth;
-       }
-
-       if (nv_device(drm->device)->card_type < NV_E0)
-               mthd = 0x0490 + (nv_crtc->index * 0x0300);
-       else
-               mthd = 0x04a0 + (nv_crtc->index * 0x0300);
-
-       push = evo_wait(dev, EVO_MASTER, 4);
-       if (push) {
-               evo_mthd(push, mthd, 1);
-               evo_data(push, mode);
-               if (update) {
-                       evo_mthd(push, 0x0080, 1);
-                       evo_data(push, 0x00000000);
-               }
-               evo_kick(push, dev, EVO_MASTER);
-       }
-
-       return 0;
-}
-
-static int
-nvd0_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
-{
-       struct drm_display_mode *omode, *umode = &nv_crtc->base.mode;
-       struct drm_device *dev = nv_crtc->base.dev;
-       struct drm_crtc *crtc = &nv_crtc->base;
-       struct nouveau_connector *nv_connector;
-       int mode = DRM_MODE_SCALE_NONE;
-       u32 oX, oY, *push;
-
-       /* start off at the resolution we programmed the crtc for, this
-        * effectively handles NONE/FULL scaling
-        */
-       nv_connector = nouveau_crtc_connector_get(nv_crtc);
-       if (nv_connector && nv_connector->native_mode)
-               mode = nv_connector->scaling_mode;
-
-       if (mode != DRM_MODE_SCALE_NONE)
-               omode = nv_connector->native_mode;
-       else
-               omode = umode;
-
-       oX = omode->hdisplay;
-       oY = omode->vdisplay;
-       if (omode->flags & DRM_MODE_FLAG_DBLSCAN)
-               oY *= 2;
-
-       /* add overscan compensation if necessary, will keep the aspect
-        * ratio the same as the backend mode unless overridden by the
-        * user setting both hborder and vborder properties.
-        */
-       if (nv_connector && ( nv_connector->underscan == UNDERSCAN_ON ||
-                            (nv_connector->underscan == UNDERSCAN_AUTO &&
-                             nv_connector->edid &&
-                             drm_detect_hdmi_monitor(nv_connector->edid)))) {
-               u32 bX = nv_connector->underscan_hborder;
-               u32 bY = nv_connector->underscan_vborder;
-               u32 aspect = (oY << 19) / oX;
-
-               if (bX) {
-                       oX -= (bX * 2);
-                       if (bY) oY -= (bY * 2);
-                       else    oY  = ((oX * aspect) + (aspect / 2)) >> 19;
-               } else {
-                       oX -= (oX >> 4) + 32;
-                       if (bY) oY -= (bY * 2);
-                       else    oY  = ((oX * aspect) + (aspect / 2)) >> 19;
-               }
-       }
-
-       /* handle CENTER/ASPECT scaling, taking into account the areas
-        * removed already for overscan compensation
-        */
-       switch (mode) {
-       case DRM_MODE_SCALE_CENTER:
-               oX = min((u32)umode->hdisplay, oX);
-               oY = min((u32)umode->vdisplay, oY);
-               /* fall-through */
-       case DRM_MODE_SCALE_ASPECT:
-               if (oY < oX) {
-                       u32 aspect = (umode->hdisplay << 19) / umode->vdisplay;
-                       oX = ((oY * aspect) + (aspect / 2)) >> 19;
-               } else {
-                       u32 aspect = (umode->vdisplay << 19) / umode->hdisplay;
-                       oY = ((oX * aspect) + (aspect / 2)) >> 19;
-               }
-               break;
-       default:
-               break;
-       }
-
-       push = evo_wait(dev, EVO_MASTER, 8);
-       if (push) {
-               evo_mthd(push, 0x04c0 + (nv_crtc->index * 0x300), 3);
-               evo_data(push, (oY << 16) | oX);
-               evo_data(push, (oY << 16) | oX);
-               evo_data(push, (oY << 16) | oX);
-               evo_mthd(push, 0x0494 + (nv_crtc->index * 0x300), 1);
-               evo_data(push, 0x00000000);
-               evo_mthd(push, 0x04b8 + (nv_crtc->index * 0x300), 1);
-               evo_data(push, (umode->vdisplay << 16) | umode->hdisplay);
-               evo_kick(push, dev, EVO_MASTER);
-               if (update) {
-                       nvd0_display_flip_stop(crtc);
-                       nvd0_display_flip_next(crtc, crtc->fb, NULL, 1);
-               }
-       }
-
-       return 0;
-}
-
-static int
-nvd0_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb,
-                   int x, int y, bool update)
-{
-       struct nouveau_framebuffer *nvfb = nouveau_framebuffer(fb);
-       u32 *push;
-
-       push = evo_wait(fb->dev, EVO_MASTER, 16);
-       if (push) {
-               evo_mthd(push, 0x0460 + (nv_crtc->index * 0x300), 1);
-               evo_data(push, nvfb->nvbo->bo.offset >> 8);
-               evo_mthd(push, 0x0468 + (nv_crtc->index * 0x300), 4);
-               evo_data(push, (fb->height << 16) | fb->width);
-               evo_data(push, nvfb->r_pitch);
-               evo_data(push, nvfb->r_format);
-               evo_data(push, nvfb->r_dma);
-               evo_mthd(push, 0x04b0 + (nv_crtc->index * 0x300), 1);
-               evo_data(push, (y << 16) | x);
-               if (update) {
-                       evo_mthd(push, 0x0080, 1);
-                       evo_data(push, 0x00000000);
-               }
-               evo_kick(push, fb->dev, EVO_MASTER);
-       }
-
-       nv_crtc->fb.tile_flags = nvfb->r_dma;
-       return 0;
-}
-
-static void
-nvd0_crtc_cursor_show(struct nouveau_crtc *nv_crtc, bool show, bool update)
-{
-       struct drm_device *dev = nv_crtc->base.dev;
-       u32 *push = evo_wait(dev, EVO_MASTER, 16);
-       if (push) {
-               if (show) {
-                       evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 2);
-                       evo_data(push, 0x85000000);
-                       evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
-                       evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1);
-                       evo_data(push, NvEvoVRAM);
-               } else {
-                       evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 1);
-                       evo_data(push, 0x05000000);
-                       evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1);
-                       evo_data(push, 0x00000000);
-               }
-
-               if (update) {
-                       evo_mthd(push, 0x0080, 1);
-                       evo_data(push, 0x00000000);
-               }
-
-               evo_kick(push, dev, EVO_MASTER);
-       }
-}
-
-static void
-nvd0_crtc_dpms(struct drm_crtc *crtc, int mode)
-{
-}
-
-static void
-nvd0_crtc_prepare(struct drm_crtc *crtc)
-{
-       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-       u32 *push;
-
-       nvd0_display_flip_stop(crtc);
-
-       push = evo_wait(crtc->dev, EVO_MASTER, 2);
-       if (push) {
-               evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
-               evo_data(push, 0x00000000);
-               evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 1);
-               evo_data(push, 0x03000000);
-               evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1);
-               evo_data(push, 0x00000000);
-               evo_kick(push, crtc->dev, EVO_MASTER);
-       }
-
-       nvd0_crtc_cursor_show(nv_crtc, false, false);
-}
-
-static void
-nvd0_crtc_commit(struct drm_crtc *crtc)
-{
-       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-       u32 *push;
-
-       push = evo_wait(crtc->dev, EVO_MASTER, 32);
-       if (push) {
-               evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
-               evo_data(push, nv_crtc->fb.tile_flags);
-               evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 4);
-               evo_data(push, 0x83000000);
-               evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8);
-               evo_data(push, 0x00000000);
-               evo_data(push, 0x00000000);
-               evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1);
-               evo_data(push, NvEvoVRAM);
-               evo_mthd(push, 0x0430 + (nv_crtc->index * 0x300), 1);
-               evo_data(push, 0xffffff00);
-               evo_kick(push, crtc->dev, EVO_MASTER);
-       }
-
-       nvd0_crtc_cursor_show(nv_crtc, nv_crtc->cursor.visible, true);
-       nvd0_display_flip_next(crtc, crtc->fb, NULL, 1);
-}
-
-static bool
-nvd0_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode,
-                    struct drm_display_mode *adjusted_mode)
-{
-       return true;
-}
-
-static int
-nvd0_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
-{
-       struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb);
-       int ret;
-
-       ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM);
-       if (ret)
-               return ret;
-
-       if (old_fb) {
-               nvfb = nouveau_framebuffer(old_fb);
-               nouveau_bo_unpin(nvfb->nvbo);
-       }
-
-       return 0;
-}
-
-static int
-nvd0_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
-                  struct drm_display_mode *mode, int x, int y,
-                  struct drm_framebuffer *old_fb)
-{
-       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-       struct nouveau_connector *nv_connector;
-       u32 ilace = (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 1;
-       u32 vscan = (mode->flags & DRM_MODE_FLAG_DBLSCAN) ? 2 : 1;
-       u32 hactive, hsynce, hbackp, hfrontp, hblanke, hblanks;
-       u32 vactive, vsynce, vbackp, vfrontp, vblanke, vblanks;
-       u32 vblan2e = 0, vblan2s = 1;
-       u32 *push;
-       int ret;
-
-       hactive = mode->htotal;
-       hsynce  = mode->hsync_end - mode->hsync_start - 1;
-       hbackp  = mode->htotal - mode->hsync_end;
-       hblanke = hsynce + hbackp;
-       hfrontp = mode->hsync_start - mode->hdisplay;
-       hblanks = mode->htotal - hfrontp - 1;
-
-       vactive = mode->vtotal * vscan / ilace;
-       vsynce  = ((mode->vsync_end - mode->vsync_start) * vscan / ilace) - 1;
-       vbackp  = (mode->vtotal - mode->vsync_end) * vscan / ilace;
-       vblanke = vsynce + vbackp;
-       vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace;
-       vblanks = vactive - vfrontp - 1;
-       if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
-               vblan2e = vactive + vsynce + vbackp;
-               vblan2s = vblan2e + (mode->vdisplay * vscan / ilace);
-               vactive = (vactive * 2) + 1;
-       }
-
-       ret = nvd0_crtc_swap_fbs(crtc, old_fb);
-       if (ret)
-               return ret;
-
-       push = evo_wait(crtc->dev, EVO_MASTER, 64);
-       if (push) {
-               evo_mthd(push, 0x0410 + (nv_crtc->index * 0x300), 6);
-               evo_data(push, 0x00000000);
-               evo_data(push, (vactive << 16) | hactive);
-               evo_data(push, ( vsynce << 16) | hsynce);
-               evo_data(push, (vblanke << 16) | hblanke);
-               evo_data(push, (vblanks << 16) | hblanks);
-               evo_data(push, (vblan2e << 16) | vblan2s);
-               evo_mthd(push, 0x042c + (nv_crtc->index * 0x300), 1);
-               evo_data(push, 0x00000000); /* ??? */
-               evo_mthd(push, 0x0450 + (nv_crtc->index * 0x300), 3);
-               evo_data(push, mode->clock * 1000);
-               evo_data(push, 0x00200000); /* ??? */
-               evo_data(push, mode->clock * 1000);
-               evo_mthd(push, 0x04d0 + (nv_crtc->index * 0x300), 2);
-               evo_data(push, 0x00000311);
-               evo_data(push, 0x00000100);
-               evo_kick(push, crtc->dev, EVO_MASTER);
-       }
-
-       nv_connector = nouveau_crtc_connector_get(nv_crtc);
-       nvd0_crtc_set_dither(nv_crtc, false);
-       nvd0_crtc_set_scale(nv_crtc, false);
-       nvd0_crtc_set_image(nv_crtc, crtc->fb, x, y, false);
-       return 0;
-}
-
-static int
-nvd0_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
-                       struct drm_framebuffer *old_fb)
-{
-       struct nouveau_drm *drm = nouveau_drm(crtc->dev);
-       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-       int ret;
-
-       if (!crtc->fb) {
-               NV_DEBUG(drm, "No FB bound\n");
-               return 0;
-       }
-
-       ret = nvd0_crtc_swap_fbs(crtc, old_fb);
-       if (ret)
-               return ret;
-
-       nvd0_display_flip_stop(crtc);
-       nvd0_crtc_set_image(nv_crtc, crtc->fb, x, y, true);
-       nvd0_display_flip_next(crtc, crtc->fb, NULL, 1);
-       return 0;
-}
-
-static int
-nvd0_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
-                              struct drm_framebuffer *fb, int x, int y,
-                              enum mode_set_atomic state)
-{
-       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-       nvd0_display_flip_stop(crtc);
-       nvd0_crtc_set_image(nv_crtc, fb, x, y, true);
-       return 0;
-}
-
-static void
-nvd0_crtc_lut_load(struct drm_crtc *crtc)
-{
-       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-       void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo);
-       int i;
-
-       for (i = 0; i < 256; i++) {
-               writew(0x6000 + (nv_crtc->lut.r[i] >> 2), lut + (i * 0x20) + 0);
-               writew(0x6000 + (nv_crtc->lut.g[i] >> 2), lut + (i * 0x20) + 2);
-               writew(0x6000 + (nv_crtc->lut.b[i] >> 2), lut + (i * 0x20) + 4);
-       }
-}
-
-static int
-nvd0_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
-                    uint32_t handle, uint32_t width, uint32_t height)
-{
-       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-       struct drm_device *dev = crtc->dev;
-       struct drm_gem_object *gem;
-       struct nouveau_bo *nvbo;
-       bool visible = (handle != 0);
-       int i, ret = 0;
-
-       if (visible) {
-               if (width != 64 || height != 64)
-                       return -EINVAL;
-
-               gem = drm_gem_object_lookup(dev, file_priv, handle);
-               if (unlikely(!gem))
-                       return -ENOENT;
-               nvbo = nouveau_gem_object(gem);
-
-               ret = nouveau_bo_map(nvbo);
-               if (ret == 0) {
-                       for (i = 0; i < 64 * 64; i++) {
-                               u32 v = nouveau_bo_rd32(nvbo, i);
-                               nouveau_bo_wr32(nv_crtc->cursor.nvbo, i, v);
-                       }
-                       nouveau_bo_unmap(nvbo);
-               }
-
-               drm_gem_object_unreference_unlocked(gem);
-       }
-
-       if (visible != nv_crtc->cursor.visible) {
-               nvd0_crtc_cursor_show(nv_crtc, visible, true);
-               nv_crtc->cursor.visible = visible;
-       }
-
-       return ret;
-}
-
-static int
-nvd0_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
-{
-       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-       int ch = EVO_CURS(nv_crtc->index);
-
-       evo_piow(crtc->dev, ch, 0x0084, (y << 16) | (x & 0xffff));
-       evo_piow(crtc->dev, ch, 0x0080, 0x00000000);
-       return 0;
-}
-
-static void
-nvd0_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
-                   uint32_t start, uint32_t size)
-{
-       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-       u32 end = max(start + size, (u32)256);
-       u32 i;
-
-       for (i = start; i < end; i++) {
-               nv_crtc->lut.r[i] = r[i];
-               nv_crtc->lut.g[i] = g[i];
-               nv_crtc->lut.b[i] = b[i];
-       }
-
-       nvd0_crtc_lut_load(crtc);
-}
-
-static void
-nvd0_crtc_destroy(struct drm_crtc *crtc)
-{
-       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-       nouveau_bo_unmap(nv_crtc->cursor.nvbo);
-       nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
-       nouveau_bo_unmap(nv_crtc->lut.nvbo);
-       nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
-       drm_crtc_cleanup(crtc);
-       kfree(crtc);
-}
-
-static const struct drm_crtc_helper_funcs nvd0_crtc_hfunc = {
-       .dpms = nvd0_crtc_dpms,
-       .prepare = nvd0_crtc_prepare,
-       .commit = nvd0_crtc_commit,
-       .mode_fixup = nvd0_crtc_mode_fixup,
-       .mode_set = nvd0_crtc_mode_set,
-       .mode_set_base = nvd0_crtc_mode_set_base,
-       .mode_set_base_atomic = nvd0_crtc_mode_set_base_atomic,
-       .load_lut = nvd0_crtc_lut_load,
-};
-
-static const struct drm_crtc_funcs nvd0_crtc_func = {
-       .cursor_set = nvd0_crtc_cursor_set,
-       .cursor_move = nvd0_crtc_cursor_move,
-       .gamma_set = nvd0_crtc_gamma_set,
-       .set_config = drm_crtc_helper_set_config,
-       .destroy = nvd0_crtc_destroy,
-       .page_flip = nouveau_crtc_page_flip,
-};
-
-static void
-nvd0_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
-{
-}
-
-static void
-nvd0_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
-{
-}
-
-static int
-nvd0_crtc_create(struct drm_device *dev, int index)
-{
-       struct nouveau_crtc *nv_crtc;
-       struct drm_crtc *crtc;
-       int ret, i;
-
-       nv_crtc = kzalloc(sizeof(*nv_crtc), GFP_KERNEL);
-       if (!nv_crtc)
-               return -ENOMEM;
-
-       nv_crtc->index = index;
-       nv_crtc->set_dither = nvd0_crtc_set_dither;
-       nv_crtc->set_scale = nvd0_crtc_set_scale;
-       nv_crtc->cursor.set_offset = nvd0_cursor_set_offset;
-       nv_crtc->cursor.set_pos = nvd0_cursor_set_pos;
-       for (i = 0; i < 256; i++) {
-               nv_crtc->lut.r[i] = i << 8;
-               nv_crtc->lut.g[i] = i << 8;
-               nv_crtc->lut.b[i] = i << 8;
-       }
-
-       crtc = &nv_crtc->base;
-       drm_crtc_init(dev, crtc, &nvd0_crtc_func);
-       drm_crtc_helper_add(crtc, &nvd0_crtc_hfunc);
-       drm_mode_crtc_set_gamma_size(crtc, 256);
-
-       ret = nouveau_bo_new(dev, 64 * 64 * 4, 0x100, TTM_PL_FLAG_VRAM,
-                            0, 0x0000, NULL, &nv_crtc->cursor.nvbo);
-       if (!ret) {
-               ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
-               if (!ret)
-                       ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
-               if (ret)
-                       nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
-       }
-
-       if (ret)
-               goto out;
-
-       ret = nouveau_bo_new(dev, 8192, 0x100, TTM_PL_FLAG_VRAM,
-                            0, 0x0000, NULL, &nv_crtc->lut.nvbo);
-       if (!ret) {
-               ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM);
-               if (!ret)
-                       ret = nouveau_bo_map(nv_crtc->lut.nvbo);
-               if (ret)
-                       nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
-       }
-
-       if (ret)
-               goto out;
-
-       nvd0_crtc_lut_load(crtc);
-
-out:
-       if (ret)
-               nvd0_crtc_destroy(crtc);
-       return ret;
-}
-
-/******************************************************************************
- * DAC
- *****************************************************************************/
-static void
-nvd0_dac_dpms(struct drm_encoder *encoder, int mode)
-{
-       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-       struct drm_device *dev = encoder->dev;
-       struct nouveau_device *device = nouveau_dev(dev);
-       int or = nv_encoder->or;
-       u32 dpms_ctrl;
-
-       dpms_ctrl = 0x80000000;
-       if (mode == DRM_MODE_DPMS_STANDBY || mode == DRM_MODE_DPMS_OFF)
-               dpms_ctrl |= 0x00000001;
-       if (mode == DRM_MODE_DPMS_SUSPEND || mode == DRM_MODE_DPMS_OFF)
-               dpms_ctrl |= 0x00000004;
-
-       nv_wait(device, 0x61a004 + (or * 0x0800), 0x80000000, 0x00000000);
-       nv_mask(device, 0x61a004 + (or * 0x0800), 0xc000007f, dpms_ctrl);
-       nv_wait(device, 0x61a004 + (or * 0x0800), 0x80000000, 0x00000000);
-}
-
-static bool
-nvd0_dac_mode_fixup(struct drm_encoder *encoder,
-                   const struct drm_display_mode *mode,
-                   struct drm_display_mode *adjusted_mode)
-{
-       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-       struct nouveau_connector *nv_connector;
-
-       nv_connector = nouveau_encoder_connector_get(nv_encoder);
-       if (nv_connector && nv_connector->native_mode) {
-               if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) {
-                       int id = adjusted_mode->base.id;
-                       *adjusted_mode = *nv_connector->native_mode;
-                       adjusted_mode->base.id = id;
-               }
-       }
-
-       return true;
-}
-
-static void
-nvd0_dac_commit(struct drm_encoder *encoder)
-{
-}
-
-static void
-nvd0_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
-                 struct drm_display_mode *adjusted_mode)
-{
-       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-       struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
-       u32 syncs, magic, *push;
-
-       syncs = 0x00000001;
-       if (mode->flags & DRM_MODE_FLAG_NHSYNC)
-               syncs |= 0x00000008;
-       if (mode->flags & DRM_MODE_FLAG_NVSYNC)
-               syncs |= 0x00000010;
-
-       magic = 0x31ec6000 | (nv_crtc->index << 25);
-       if (mode->flags & DRM_MODE_FLAG_INTERLACE)
-               magic |= 0x00000001;
-
-       nvd0_dac_dpms(encoder, DRM_MODE_DPMS_ON);
-
-       push = evo_wait(encoder->dev, EVO_MASTER, 8);
-       if (push) {
-               evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2);
-               evo_data(push, syncs);
-               evo_data(push, magic);
-               evo_mthd(push, 0x0180 + (nv_encoder->or * 0x020), 2);
-               evo_data(push, 1 << nv_crtc->index);
-               evo_data(push, 0x00ff);
-               evo_kick(push, encoder->dev, EVO_MASTER);
-       }
-
-       nv_encoder->crtc = encoder->crtc;
-}
-
-static void
-nvd0_dac_disconnect(struct drm_encoder *encoder)
-{
-       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-       struct drm_device *dev = encoder->dev;
-       u32 *push;
-
-       if (nv_encoder->crtc) {
-               nvd0_crtc_prepare(nv_encoder->crtc);
-
-               push = evo_wait(dev, EVO_MASTER, 4);
-               if (push) {
-                       evo_mthd(push, 0x0180 + (nv_encoder->or * 0x20), 1);
-                       evo_data(push, 0x00000000);
-                       evo_mthd(push, 0x0080, 1);
-                       evo_data(push, 0x00000000);
-                       evo_kick(push, dev, EVO_MASTER);
-               }
-
-               nv_encoder->crtc = NULL;
-       }
-}
-
-static enum drm_connector_status
-nvd0_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
-{
-       enum drm_connector_status status = connector_status_disconnected;
-       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-       struct drm_device *dev = encoder->dev;
-       struct nouveau_device *device = nouveau_dev(dev);
-       int or = nv_encoder->or;
-       u32 load;
-
-       nv_wr32(device, 0x61a00c + (or * 0x800), 0x00100000);
-       udelay(9500);
-       nv_wr32(device, 0x61a00c + (or * 0x800), 0x80000000);
-
-       load = nv_rd32(device, 0x61a00c + (or * 0x800));
-       if ((load & 0x38000000) == 0x38000000)
-               status = connector_status_connected;
-
-       nv_wr32(device, 0x61a00c + (or * 0x800), 0x00000000);
-       return status;
-}
-
-static void
-nvd0_dac_destroy(struct drm_encoder *encoder)
-{
-       drm_encoder_cleanup(encoder);
-       kfree(encoder);
-}
-
-static const struct drm_encoder_helper_funcs nvd0_dac_hfunc = {
-       .dpms = nvd0_dac_dpms,
-       .mode_fixup = nvd0_dac_mode_fixup,
-       .prepare = nvd0_dac_disconnect,
-       .commit = nvd0_dac_commit,
-       .mode_set = nvd0_dac_mode_set,
-       .disable = nvd0_dac_disconnect,
-       .get_crtc = nvd0_display_crtc_get,
-       .detect = nvd0_dac_detect
-};
-
-static const struct drm_encoder_funcs nvd0_dac_func = {
-       .destroy = nvd0_dac_destroy,
-};
-
-static int
-nvd0_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
-{
-       struct drm_device *dev = connector->dev;
-       struct nouveau_encoder *nv_encoder;
-       struct drm_encoder *encoder;
-
-       nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
-       if (!nv_encoder)
-               return -ENOMEM;
-       nv_encoder->dcb = dcbe;
-       nv_encoder->or = ffs(dcbe->or) - 1;
-
-       encoder = to_drm_encoder(nv_encoder);
-       encoder->possible_crtcs = dcbe->heads;
-       encoder->possible_clones = 0;
-       drm_encoder_init(dev, encoder, &nvd0_dac_func, DRM_MODE_ENCODER_DAC);
-       drm_encoder_helper_add(encoder, &nvd0_dac_hfunc);
-
-       drm_mode_connector_attach_encoder(connector, encoder);
-       return 0;
-}
-
-/******************************************************************************
- * Audio
- *****************************************************************************/
-static void
-nvd0_audio_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
-{
-       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-       struct nouveau_connector *nv_connector;
-       struct drm_device *dev = encoder->dev;
-       struct nouveau_device *device = nouveau_dev(dev);
-       int i, or = nv_encoder->or * 0x30;
-
-       nv_connector = nouveau_encoder_connector_get(nv_encoder);
-       if (!drm_detect_monitor_audio(nv_connector->edid))
-               return;
-
-       nv_mask(device, 0x10ec10 + or, 0x80000003, 0x80000001);
-
-       drm_edid_to_eld(&nv_connector->base, nv_connector->edid);
-       if (nv_connector->base.eld[0]) {
-               u8 *eld = nv_connector->base.eld;
-
-               for (i = 0; i < eld[2] * 4; i++)
-                       nv_wr32(device, 0x10ec00 + or, (i << 8) | eld[i]);
-               for (i = eld[2] * 4; i < 0x60; i++)
-                       nv_wr32(device, 0x10ec00 + or, (i << 8) | 0x00);
-
-               nv_mask(device, 0x10ec10 + or, 0x80000002, 0x80000002);
-       }
-}
-
-static void
-nvd0_audio_disconnect(struct drm_encoder *encoder)
-{
-       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-       struct drm_device *dev = encoder->dev;
-       struct nouveau_device *device = nouveau_dev(dev);
-       int or = nv_encoder->or * 0x30;
-
-       nv_mask(device, 0x10ec10 + or, 0x80000003, 0x80000000);
-}
-
-/******************************************************************************
- * HDMI
- *****************************************************************************/
-static void
-nvd0_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
-{
-       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-       struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
-       struct nouveau_connector *nv_connector;
-       struct drm_device *dev = encoder->dev;
-       struct nouveau_device *device = nouveau_dev(dev);
-       int head = nv_crtc->index * 0x800;
-       u32 rekey = 56; /* binary driver, and tegra constant */
-       u32 max_ac_packet;
-
-       nv_connector = nouveau_encoder_connector_get(nv_encoder);
-       if (!drm_detect_hdmi_monitor(nv_connector->edid))
-               return;
-
-       max_ac_packet  = mode->htotal - mode->hdisplay;
-       max_ac_packet -= rekey;
-       max_ac_packet -= 18; /* constant from tegra */
-       max_ac_packet /= 32;
-
-       /* AVI InfoFrame */
-       nv_mask(device, 0x616714 + head, 0x00000001, 0x00000000);
-       nv_wr32(device, 0x61671c + head, 0x000d0282);
-       nv_wr32(device, 0x616720 + head, 0x0000006f);
-       nv_wr32(device, 0x616724 + head, 0x00000000);
-       nv_wr32(device, 0x616728 + head, 0x00000000);
-       nv_wr32(device, 0x61672c + head, 0x00000000);
-       nv_mask(device, 0x616714 + head, 0x00000001, 0x00000001);
-
-       /* ??? InfoFrame? */
-       nv_mask(device, 0x6167a4 + head, 0x00000001, 0x00000000);
-       nv_wr32(device, 0x6167ac + head, 0x00000010);
-       nv_mask(device, 0x6167a4 + head, 0x00000001, 0x00000001);
-
-       /* HDMI_CTRL */
-       nv_mask(device, 0x616798 + head, 0x401f007f, 0x40000000 | rekey |
-                                                 max_ac_packet << 16);
-
-       /* NFI, audio doesn't work without it though.. */
-       nv_mask(device, 0x616548 + head, 0x00000070, 0x00000000);
-
-       nvd0_audio_mode_set(encoder, mode);
-}
-
-static void
-nvd0_hdmi_disconnect(struct drm_encoder *encoder)
-{
-       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-       struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
-       struct drm_device *dev = encoder->dev;
-       struct nouveau_device *device = nouveau_dev(dev);
-       int head = nv_crtc->index * 0x800;
-
-       nvd0_audio_disconnect(encoder);
-
-       nv_mask(device, 0x616798 + head, 0x40000000, 0x00000000);
-       nv_mask(device, 0x6167a4 + head, 0x00000001, 0x00000000);
-       nv_mask(device, 0x616714 + head, 0x00000001, 0x00000000);
-}
-
-/******************************************************************************
- * SOR
- *****************************************************************************/
-static inline u32
-nvd0_sor_dp_lane_map(struct drm_device *dev, struct dcb_output *dcb, u8 lane)
-{
-       static const u8 nvd0[] = { 16, 8, 0, 24 };
-       return nvd0[lane];
-}
-
-static void
-nvd0_sor_dp_train_set(struct drm_device *dev, struct dcb_output *dcb, u8 pattern)
-{
-       struct nouveau_device *device = nouveau_dev(dev);
-       const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
-       const u32 loff = (or * 0x800) + (link * 0x80);
-       nv_mask(device, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * pattern);
-}
-
-static void
-nvd0_sor_dp_train_adj(struct drm_device *dev, struct dcb_output *dcb,
-                     u8 lane, u8 swing, u8 preem)
-{
-       struct nouveau_device *device = nouveau_dev(dev);
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
-       const u32 loff = (or * 0x800) + (link * 0x80);
-       u32 shift = nvd0_sor_dp_lane_map(dev, dcb, lane);
-       u32 mask = 0x000000ff << shift;
-       u8 *table, *entry, *config = NULL;
-
-       switch (swing) {
-       case 0: preem += 0; break;
-       case 1: preem += 4; break;
-       case 2: preem += 7; break;
-       case 3: preem += 9; break;
-       }
-
-       table = nouveau_dp_bios_data(dev, dcb, &entry);
-       if (table) {
-               if (table[0] == 0x30) {
-                       config  = entry + table[4];
-                       config += table[5] * preem;
-               } else
-               if (table[0] == 0x40) {
-                       config  = table + table[1];
-                       config += table[2] * table[3];
-                       config += table[6] * preem;
-               }
-       }
-
-       if (!config) {
-               NV_ERROR(drm, "PDISP: unsupported DP table for chipset\n");
-               return;
-       }
-
-       nv_mask(device, 0x61c118 + loff, mask, config[1] << shift);
-       nv_mask(device, 0x61c120 + loff, mask, config[2] << shift);
-       nv_mask(device, 0x61c130 + loff, 0x0000ff00, config[3] << 8);
-       nv_mask(device, 0x61c13c + loff, 0x00000000, 0x00000000);
-}
-
-static void
-nvd0_sor_dp_link_set(struct drm_device *dev, struct dcb_output *dcb, int crtc,
-                    int link_nr, u32 link_bw, bool enhframe)
-{
-       struct nouveau_device *device = nouveau_dev(dev);
-       const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
-       const u32 loff = (or * 0x800) + (link * 0x80);
-       const u32 soff = (or * 0x800);
-       u32 dpctrl = nv_rd32(device, 0x61c10c + loff) & ~0x001f4000;
-       u32 clksor = nv_rd32(device, 0x612300 + soff) & ~0x007c0000;
-       u32 script = 0x0000, lane_mask = 0;
-       u8 *table, *entry;
-       int i;
-
-       link_bw /= 27000;
-
-       table = nouveau_dp_bios_data(dev, dcb, &entry);
-       if (table) {
-               if      (table[0] == 0x30) entry = ROMPTR(dev, entry[10]);
-               else if (table[0] == 0x40) entry = ROMPTR(dev, entry[9]);
-               else                       entry = NULL;
-
-               while (entry) {
-                       if (entry[0] >= link_bw)
-                               break;
-                       entry += 3;
-               }
-
-               nouveau_bios_run_init_table(dev, script, dcb, crtc);
-       }
-
-       clksor |= link_bw << 18;
-       dpctrl |= ((1 << link_nr) - 1) << 16;
-       if (enhframe)
-               dpctrl |= 0x00004000;
-
-       for (i = 0; i < link_nr; i++)
-               lane_mask |= 1 << (nvd0_sor_dp_lane_map(dev, dcb, i) >> 3);
-
-       nv_wr32(device, 0x612300 + soff, clksor);
-       nv_wr32(device, 0x61c10c + loff, dpctrl);
-       nv_mask(device, 0x61c130 + loff, 0x0000000f, lane_mask);
-}
-
-static void
-nvd0_sor_dp_link_get(struct drm_device *dev, struct dcb_output *dcb,
-                    u32 *link_nr, u32 *link_bw)
-{
-       struct nouveau_device *device = nouveau_dev(dev);
-       const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
-       const u32 loff = (or * 0x800) + (link * 0x80);
-       const u32 soff = (or * 0x800);
-       u32 dpctrl = nv_rd32(device, 0x61c10c + loff) & 0x000f0000;
-       u32 clksor = nv_rd32(device, 0x612300 + soff);
-
-       if      (dpctrl > 0x00030000) *link_nr = 4;
-       else if (dpctrl > 0x00010000) *link_nr = 2;
-       else                          *link_nr = 1;
-
-       *link_bw  = (clksor & 0x007c0000) >> 18;
-       *link_bw *= 27000;
-}
-
-static void
-nvd0_sor_dp_calc_tu(struct drm_device *dev, struct dcb_output *dcb,
-                   u32 crtc, u32 datarate)
-{
-       struct nouveau_device *device = nouveau_dev(dev);
-       const u32 symbol = 100000;
-       const u32 TU = 64;
-       u32 link_nr, link_bw;
-       u64 ratio, value;
-
-       nvd0_sor_dp_link_get(dev, dcb, &link_nr, &link_bw);
-
-       ratio  = datarate;
-       ratio *= symbol;
-       do_div(ratio, link_nr * link_bw);
-
-       value  = (symbol - ratio) * TU;
-       value *= ratio;
-       do_div(value, symbol);
-       do_div(value, symbol);
-
-       value += 5;
-       value |= 0x08000000;
-
-       nv_wr32(device, 0x616610 + (crtc * 0x800), value);
-}
-
-static void
-nvd0_sor_dpms(struct drm_encoder *encoder, int mode)
-{
-       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-       struct drm_device *dev = encoder->dev;
-       struct nouveau_device *device = nouveau_dev(dev);
-       struct drm_encoder *partner;
-       int or = nv_encoder->or;
-       u32 dpms_ctrl;
-
-       nv_encoder->last_dpms = mode;
-
-       list_for_each_entry(partner, &dev->mode_config.encoder_list, head) {
-               struct nouveau_encoder *nv_partner = nouveau_encoder(partner);
-
-               if (partner->encoder_type != DRM_MODE_ENCODER_TMDS)
-                       continue;
-
-               if (nv_partner != nv_encoder &&
-                   nv_partner->dcb->or == nv_encoder->dcb->or) {
-                       if (nv_partner->last_dpms == DRM_MODE_DPMS_ON)
-                               return;
-                       break;
-               }
-       }
-
-       dpms_ctrl  = (mode == DRM_MODE_DPMS_ON);
-       dpms_ctrl |= 0x80000000;
-
-       nv_wait(device, 0x61c004 + (or * 0x0800), 0x80000000, 0x00000000);
-       nv_mask(device, 0x61c004 + (or * 0x0800), 0x80000001, dpms_ctrl);
-       nv_wait(device, 0x61c004 + (or * 0x0800), 0x80000000, 0x00000000);
-       nv_wait(device, 0x61c030 + (or * 0x0800), 0x10000000, 0x00000000);
-
-       if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
-               struct dp_train_func func = {
-                       .link_set = nvd0_sor_dp_link_set,
-                       .train_set = nvd0_sor_dp_train_set,
-                       .train_adj = nvd0_sor_dp_train_adj
-               };
-
-               nouveau_dp_dpms(encoder, mode, nv_encoder->dp.datarate, &func);
-       }
-}
-
-static bool
-nvd0_sor_mode_fixup(struct drm_encoder *encoder,
-                   const struct drm_display_mode *mode,
-                   struct drm_display_mode *adjusted_mode)
-{
-       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-       struct nouveau_connector *nv_connector;
-
-       nv_connector = nouveau_encoder_connector_get(nv_encoder);
-       if (nv_connector && nv_connector->native_mode) {
-               if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) {
-                       int id = adjusted_mode->base.id;
-                       *adjusted_mode = *nv_connector->native_mode;
-                       adjusted_mode->base.id = id;
-               }
-       }
-
-       return true;
-}
-
-static void
-nvd0_sor_disconnect(struct drm_encoder *encoder)
-{
-       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-       struct drm_device *dev = encoder->dev;
-       u32 *push;
-
-       if (nv_encoder->crtc) {
-               nvd0_crtc_prepare(nv_encoder->crtc);
-
-               push = evo_wait(dev, EVO_MASTER, 4);
-               if (push) {
-                       evo_mthd(push, 0x0200 + (nv_encoder->or * 0x20), 1);
-                       evo_data(push, 0x00000000);
-                       evo_mthd(push, 0x0080, 1);
-                       evo_data(push, 0x00000000);
-                       evo_kick(push, dev, EVO_MASTER);
-               }
-
-               nvd0_hdmi_disconnect(encoder);
-
-               nv_encoder->crtc = NULL;
-               nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
-       }
-}
-
-static void
-nvd0_sor_prepare(struct drm_encoder *encoder)
-{
-       nvd0_sor_disconnect(encoder);
-       if (nouveau_encoder(encoder)->dcb->type == DCB_OUTPUT_DP)
-               evo_sync(encoder->dev, EVO_MASTER);
-}
-
-static void
-nvd0_sor_commit(struct drm_encoder *encoder)
-{
-}
-
-static void
-nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
-                 struct drm_display_mode *mode)
-{
-       struct drm_device *dev = encoder->dev;
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-       struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
-       struct nouveau_connector *nv_connector;
-       struct nvbios *bios = &drm->vbios;
-       u32 mode_ctrl = (1 << nv_crtc->index);
-       u32 syncs, magic, *push;
-       u32 or_config;
-
-       syncs = 0x00000001;
-       if (mode->flags & DRM_MODE_FLAG_NHSYNC)
-               syncs |= 0x00000008;
-       if (mode->flags & DRM_MODE_FLAG_NVSYNC)
-               syncs |= 0x00000010;
-
-       magic = 0x31ec6000 | (nv_crtc->index << 25);
-       if (mode->flags & DRM_MODE_FLAG_INTERLACE)
-               magic |= 0x00000001;
-
-       nv_connector = nouveau_encoder_connector_get(nv_encoder);
-       switch (nv_encoder->dcb->type) {
-       case DCB_OUTPUT_TMDS:
-               if (nv_encoder->dcb->sorconf.link & 1) {
-                       if (mode->clock < 165000)
-                               mode_ctrl |= 0x00000100;
-                       else
-                               mode_ctrl |= 0x00000500;
-               } else {
-                       mode_ctrl |= 0x00000200;
-               }
-
-               or_config = (mode_ctrl & 0x00000f00) >> 8;
-               if (mode->clock >= 165000)
-                       or_config |= 0x0100;
-
-               nvd0_hdmi_mode_set(encoder, mode);
-               break;
-       case DCB_OUTPUT_LVDS:
-               or_config = (mode_ctrl & 0x00000f00) >> 8;
-               if (bios->fp_no_ddc) {
-                       if (bios->fp.dual_link)
-                               or_config |= 0x0100;
-                       if (bios->fp.if_is_24bit)
-                               or_config |= 0x0200;
-               } else {
-                       if (nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
-                               if (((u8 *)nv_connector->edid)[121] == 2)
-                                       or_config |= 0x0100;
-                       } else
-                       if (mode->clock >= bios->fp.duallink_transition_clk) {
-                               or_config |= 0x0100;
-                       }
-
-                       if (or_config & 0x0100) {
-                               if (bios->fp.strapless_is_24bit & 2)
-                                       or_config |= 0x0200;
-                       } else {
-                               if (bios->fp.strapless_is_24bit & 1)
-                                       or_config |= 0x0200;
-                       }
-
-                       if (nv_connector->base.display_info.bpc == 8)
-                               or_config |= 0x0200;
-
-               }
-               break;
-       case DCB_OUTPUT_DP:
-               if (nv_connector->base.display_info.bpc == 6) {
-                       nv_encoder->dp.datarate = mode->clock * 18 / 8;
-                       syncs |= 0x00000002 << 6;
-               } else {
-                       nv_encoder->dp.datarate = mode->clock * 24 / 8;
-                       syncs |= 0x00000005 << 6;
-               }
-
-               if (nv_encoder->dcb->sorconf.link & 1)
-                       mode_ctrl |= 0x00000800;
-               else
-                       mode_ctrl |= 0x00000900;
-
-               or_config = (mode_ctrl & 0x00000f00) >> 8;
-               break;
-       default:
-               BUG_ON(1);
-               break;
-       }
-
-       nvd0_sor_dpms(encoder, DRM_MODE_DPMS_ON);
-
-       if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
-               nvd0_sor_dp_calc_tu(dev, nv_encoder->dcb, nv_crtc->index,
-                                        nv_encoder->dp.datarate);
-       }
-
-       push = evo_wait(dev, EVO_MASTER, 8);
-       if (push) {
-               evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2);
-               evo_data(push, syncs);
-               evo_data(push, magic);
-               evo_mthd(push, 0x0200 + (nv_encoder->or * 0x020), 2);
-               evo_data(push, mode_ctrl);
-               evo_data(push, or_config);
-               evo_kick(push, dev, EVO_MASTER);
-       }
-
-       nv_encoder->crtc = encoder->crtc;
-}
-
-static void
-nvd0_sor_destroy(struct drm_encoder *encoder)
-{
-       drm_encoder_cleanup(encoder);
-       kfree(encoder);
-}
-
-static const struct drm_encoder_helper_funcs nvd0_sor_hfunc = {
-       .dpms = nvd0_sor_dpms,
-       .mode_fixup = nvd0_sor_mode_fixup,
-       .prepare = nvd0_sor_prepare,
-       .commit = nvd0_sor_commit,
-       .mode_set = nvd0_sor_mode_set,
-       .disable = nvd0_sor_disconnect,
-       .get_crtc = nvd0_display_crtc_get,
-};
-
-static const struct drm_encoder_funcs nvd0_sor_func = {
-       .destroy = nvd0_sor_destroy,
-};
-
-static int
-nvd0_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
-{
-       struct drm_device *dev = connector->dev;
-       struct nouveau_encoder *nv_encoder;
-       struct drm_encoder *encoder;
-
-       nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
-       if (!nv_encoder)
-               return -ENOMEM;
-       nv_encoder->dcb = dcbe;
-       nv_encoder->or = ffs(dcbe->or) - 1;
-       nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
-
-       encoder = to_drm_encoder(nv_encoder);
-       encoder->possible_crtcs = dcbe->heads;
-       encoder->possible_clones = 0;
-       drm_encoder_init(dev, encoder, &nvd0_sor_func, DRM_MODE_ENCODER_TMDS);
-       drm_encoder_helper_add(encoder, &nvd0_sor_hfunc);
-
-       drm_mode_connector_attach_encoder(connector, encoder);
-       return 0;
-}
-
-/******************************************************************************
- * IRQ
- *****************************************************************************/
-static struct dcb_output *
-lookup_dcb(struct drm_device *dev, int id, u32 mc)
-{
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       int type, or, i, link = -1;
-
-       if (id < 4) {
-               type = DCB_OUTPUT_ANALOG;
-               or   = id;
-       } else {
-               switch (mc & 0x00000f00) {
-               case 0x00000000: link = 0; type = DCB_OUTPUT_LVDS; break;
-               case 0x00000100: link = 0; type = DCB_OUTPUT_TMDS; break;
-               case 0x00000200: link = 1; type = DCB_OUTPUT_TMDS; break;
-               case 0x00000500: link = 0; type = DCB_OUTPUT_TMDS; break;
-               case 0x00000800: link = 0; type = DCB_OUTPUT_DP; break;
-               case 0x00000900: link = 1; type = DCB_OUTPUT_DP; break;
-               default:
-                       NV_ERROR(drm, "PDISP: unknown SOR mc 0x%08x\n", mc);
-                       return NULL;
-               }
-
-               or = id - 4;
-       }
-
-       for (i = 0; i < drm->vbios.dcb.entries; i++) {
-               struct dcb_output *dcb = &drm->vbios.dcb.entry[i];
-               if (dcb->type == type && (dcb->or & (1 << or)) &&
-                   (link < 0 || link == !(dcb->sorconf.link & 1)))
-                       return dcb;
-       }
-
-       NV_ERROR(drm, "PDISP: DCB for %d/0x%08x not found\n", id, mc);
-       return NULL;
-}
-
-static void
-nvd0_display_unk1_handler(struct drm_device *dev, u32 crtc, u32 mask)
-{
-       struct nouveau_device *device = nouveau_dev(dev);
-       struct dcb_output *dcb;
-       int i;
-
-       for (i = 0; mask && i < 8; i++) {
-               u32 mcc = nv_rd32(device, 0x640180 + (i * 0x20));
-               if (!(mcc & (1 << crtc)))
-                       continue;
-
-               dcb = lookup_dcb(dev, i, mcc);
-               if (!dcb)
-                       continue;
-
-               nouveau_bios_run_display_table(dev, 0x0000, -1, dcb, crtc);
-       }
-
-       nv_wr32(device, 0x6101d4, 0x00000000);
-       nv_wr32(device, 0x6109d4, 0x00000000);
-       nv_wr32(device, 0x6101d0, 0x80000000);
-}
-
-static void
-nvd0_display_unk2_handler(struct drm_device *dev, u32 crtc, u32 mask)
-{
-       struct nouveau_device *device = nouveau_dev(dev);
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct dcb_output *dcb;
-       u32 or, tmp, pclk;
-       int i;
-
-       for (i = 0; mask && i < 8; i++) {
-               u32 mcc = nv_rd32(device, 0x640180 + (i * 0x20));
-               if (!(mcc & (1 << crtc)))
-                       continue;
-
-               dcb = lookup_dcb(dev, i, mcc);
-               if (!dcb)
-                       continue;
-
-               nouveau_bios_run_display_table(dev, 0x0000, -2, dcb, crtc);
-       }
-
-       pclk = nv_rd32(device, 0x660450 + (crtc * 0x300)) / 1000;
-       NV_DEBUG(drm, "PDISP: crtc %d pclk %d mask 0x%08x\n",
-                         crtc, pclk, mask);
-       if (pclk && (mask & 0x00010000)) {
-               nv50_crtc_set_clock(dev, crtc, pclk);
-       }
-
-       for (i = 0; mask && i < 8; i++) {
-               u32 mcp = nv_rd32(device, 0x660180 + (i * 0x20));
-               u32 cfg = nv_rd32(device, 0x660184 + (i * 0x20));
-               if (!(mcp & (1 << crtc)))
-                       continue;
-
-               dcb = lookup_dcb(dev, i, mcp);
-               if (!dcb)
-                       continue;
-               or = ffs(dcb->or) - 1;
-
-               nouveau_bios_run_display_table(dev, cfg, pclk, dcb, crtc);
-
-               nv_wr32(device, 0x612200 + (crtc * 0x800), 0x00000000);
-               switch (dcb->type) {
-               case DCB_OUTPUT_ANALOG:
-                       nv_wr32(device, 0x612280 + (or * 0x800), 0x00000000);
-                       break;
-               case DCB_OUTPUT_TMDS:
-               case DCB_OUTPUT_LVDS:
-               case DCB_OUTPUT_DP:
-                       if (cfg & 0x00000100)
-                               tmp = 0x00000101;
-                       else
-                               tmp = 0x00000000;
-
-                       nv_mask(device, 0x612300 + (or * 0x800), 0x00000707, tmp);
-                       break;
-               default:
-                       break;
-               }
-
-               break;
-       }
-
-       nv_wr32(device, 0x6101d4, 0x00000000);
-       nv_wr32(device, 0x6109d4, 0x00000000);
-       nv_wr32(device, 0x6101d0, 0x80000000);
-}
-
-static void
-nvd0_display_unk4_handler(struct drm_device *dev, u32 crtc, u32 mask)
-{
-       struct nouveau_device *device = nouveau_dev(dev);
-       struct dcb_output *dcb;
-       int pclk, i;
-
-       pclk = nv_rd32(device, 0x660450 + (crtc * 0x300)) / 1000;
-
-       for (i = 0; mask && i < 8; i++) {
-               u32 mcp = nv_rd32(device, 0x660180 + (i * 0x20));
-               u32 cfg = nv_rd32(device, 0x660184 + (i * 0x20));
-               if (!(mcp & (1 << crtc)))
-                       continue;
-
-               dcb = lookup_dcb(dev, i, mcp);
-               if (!dcb)
-                       continue;
-
-               nouveau_bios_run_display_table(dev, cfg, -pclk, dcb, crtc);
-       }
-
-       nv_wr32(device, 0x6101d4, 0x00000000);
-       nv_wr32(device, 0x6109d4, 0x00000000);
-       nv_wr32(device, 0x6101d0, 0x80000000);
-}
-
-static void
-nvd0_display_bh(unsigned long data)
-{
-       struct drm_device *dev = (struct drm_device *)data;
-       struct nouveau_device *device = nouveau_dev(dev);
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nvd0_display *disp = nvd0_display(dev);
-       u32 mask = 0, crtc = ~0;
-       int i;
-
-       if (drm_debug & (DRM_UT_DRIVER | DRM_UT_KMS)) {
-               NV_INFO(drm, "PDISP: modeset req %d\n", disp->modeset);
-               NV_INFO(drm, " STAT: 0x%08x 0x%08x 0x%08x\n",
-                        nv_rd32(device, 0x6101d0),
-                        nv_rd32(device, 0x6101d4), nv_rd32(device, 0x6109d4));
-               for (i = 0; i < 8; i++) {
-                       NV_INFO(drm, " %s%d: 0x%08x 0x%08x\n",
-                               i < 4 ? "DAC" : "SOR", i,
-                               nv_rd32(device, 0x640180 + (i * 0x20)),
-                               nv_rd32(device, 0x660180 + (i * 0x20)));
-               }
-       }
-
-       while (!mask && ++crtc < dev->mode_config.num_crtc)
-               mask = nv_rd32(device, 0x6101d4 + (crtc * 0x800));
-
-       if (disp->modeset & 0x00000001)
-               nvd0_display_unk1_handler(dev, crtc, mask);
-       if (disp->modeset & 0x00000002)
-               nvd0_display_unk2_handler(dev, crtc, mask);
-       if (disp->modeset & 0x00000004)
-               nvd0_display_unk4_handler(dev, crtc, mask);
-}
-
-void
-nvd0_display_intr(struct drm_device *dev)
-{
-       struct nvd0_display *disp = nvd0_display(dev);
-       struct nouveau_device *device = nouveau_dev(dev);
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       u32 intr = nv_rd32(device, 0x610088);
-
-       if (intr & 0x00000001) {
-               u32 stat = nv_rd32(device, 0x61008c);
-               nv_wr32(device, 0x61008c, stat);
-               intr &= ~0x00000001;
-       }
-
-       if (intr & 0x00000002) {
-               u32 stat = nv_rd32(device, 0x61009c);
-               int chid = ffs(stat) - 1;
-               if (chid >= 0) {
-                       u32 mthd = nv_rd32(device, 0x6101f0 + (chid * 12));
-                       u32 data = nv_rd32(device, 0x6101f4 + (chid * 12));
-                       u32 unkn = nv_rd32(device, 0x6101f8 + (chid * 12));
-
-                       NV_INFO(drm, "EvoCh: chid %d mthd 0x%04x data 0x%08x "
-                                    "0x%08x 0x%08x\n",
-                               chid, (mthd & 0x0000ffc), data, mthd, unkn);
-                       nv_wr32(device, 0x61009c, (1 << chid));
-                       nv_wr32(device, 0x6101f0 + (chid * 12), 0x90000000);
-               }
-
-               intr &= ~0x00000002;
-       }
-
-       if (intr & 0x00100000) {
-               u32 stat = nv_rd32(device, 0x6100ac);
-
-               if (stat & 0x00000007) {
-                       disp->modeset = stat;
-                       tasklet_schedule(&disp->tasklet);
-
-                       nv_wr32(device, 0x6100ac, (stat & 0x00000007));
-                       stat &= ~0x00000007;
-               }
-
-               if (stat) {
-                       NV_INFO(drm, "PDISP: unknown intr24 0x%08x\n", stat);
-                       nv_wr32(device, 0x6100ac, stat);
-               }
-
-               intr &= ~0x00100000;
-       }
-
-       intr &= ~0x0f000000; /* vblank, handled in core */
-       if (intr)
-               NV_INFO(drm, "PDISP: unknown intr 0x%08x\n", intr);
-}
-
-/******************************************************************************
- * Init
- *****************************************************************************/
-void
-nvd0_display_fini(struct drm_device *dev)
-{
-       int i;
-
-       /* fini cursors + overlays + flips */
-       for (i = 1; i >= 0; i--) {
-               evo_fini_pio(dev, EVO_CURS(i));
-               evo_fini_pio(dev, EVO_OIMM(i));
-               evo_fini_dma(dev, EVO_OVLY(i));
-               evo_fini_dma(dev, EVO_FLIP(i));
-       }
-
-       /* fini master */
-       evo_fini_dma(dev, EVO_MASTER);
-}
-
-int
-nvd0_display_init(struct drm_device *dev)
-{
-       struct nvd0_display *disp = nvd0_display(dev);
-       struct nouveau_device *device = nouveau_dev(dev);
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       int ret, i;
-       u32 *push;
-
-       if (nv_rd32(device, 0x6100ac) & 0x00000100) {
-               nv_wr32(device, 0x6100ac, 0x00000100);
-               nv_mask(device, 0x6194e8, 0x00000001, 0x00000000);
-               if (!nv_wait(device, 0x6194e8, 0x00000002, 0x00000000)) {
-                       NV_ERROR(drm, "PDISP: 0x6194e8 0x%08x\n",
-                                nv_rd32(device, 0x6194e8));
-                       return -EBUSY;
-               }
-       }
-
-       /* nfi what these are exactly, i do know that SOR_MODE_CTRL won't
-        * work at all unless you do the SOR part below.
-        */
-       for (i = 0; i < 3; i++) {
-               u32 dac = nv_rd32(device, 0x61a000 + (i * 0x800));
-               nv_wr32(device, 0x6101c0 + (i * 0x800), dac);
-       }
-
-       for (i = 0; i < 4; i++) {
-               u32 sor = nv_rd32(device, 0x61c000 + (i * 0x800));
-               nv_wr32(device, 0x6301c4 + (i * 0x800), sor);
-       }
-
-       for (i = 0; i < dev->mode_config.num_crtc; i++) {
-               u32 crtc0 = nv_rd32(device, 0x616104 + (i * 0x800));
-               u32 crtc1 = nv_rd32(device, 0x616108 + (i * 0x800));
-               u32 crtc2 = nv_rd32(device, 0x61610c + (i * 0x800));
-               nv_wr32(device, 0x6101b4 + (i * 0x800), crtc0);
-               nv_wr32(device, 0x6101b8 + (i * 0x800), crtc1);
-               nv_wr32(device, 0x6101bc + (i * 0x800), crtc2);
-       }
-
-       /* point at our hash table / objects, enable interrupts */
-       nv_wr32(device, 0x610010, (disp->mem->addr >> 8) | 9);
-       nv_mask(device, 0x6100b0, 0x00000307, 0x00000307);
-
-       /* init master */
-       ret = evo_init_dma(dev, EVO_MASTER);
-       if (ret)
-               goto error;
-
-       /* init flips + overlays + cursors */
-       for (i = 0; i < dev->mode_config.num_crtc; i++) {
-               if ((ret = evo_init_dma(dev, EVO_FLIP(i))) ||
-                   (ret = evo_init_dma(dev, EVO_OVLY(i))) ||
-                   (ret = evo_init_pio(dev, EVO_OIMM(i))) ||
-                   (ret = evo_init_pio(dev, EVO_CURS(i))))
-                       goto error;
-       }
-
-       push = evo_wait(dev, EVO_MASTER, 32);
-       if (!push) {
-               ret = -EBUSY;
-               goto error;
-       }
-       evo_mthd(push, 0x0088, 1);
-       evo_data(push, NvEvoSync);
-       evo_mthd(push, 0x0084, 1);
-       evo_data(push, 0x00000000);
-       evo_mthd(push, 0x0084, 1);
-       evo_data(push, 0x80000000);
-       evo_mthd(push, 0x008c, 1);
-       evo_data(push, 0x00000000);
-       evo_kick(push, dev, EVO_MASTER);
-
-error:
-       if (ret)
-               nvd0_display_fini(dev);
-       return ret;
-}
-
-void
-nvd0_display_destroy(struct drm_device *dev)
-{
-       struct nvd0_display *disp = nvd0_display(dev);
-       struct pci_dev *pdev = dev->pdev;
-       int i;
-
-       for (i = 0; i < EVO_DMA_NR; i++) {
-               struct evo *evo = &disp->evo[i];
-               pci_free_consistent(pdev, PAGE_SIZE, evo->ptr, evo->handle);
-       }
-
-       nouveau_gpuobj_ref(NULL, &disp->mem);
-       nouveau_bo_unmap(disp->sync);
-       nouveau_bo_ref(NULL, &disp->sync);
-
-       nouveau_display(dev)->priv = NULL;
-       kfree(disp);
-}
-
-int
-nvd0_display_create(struct drm_device *dev)
-{
-       struct nouveau_device *device = nouveau_dev(dev);
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nouveau_bar *bar = nouveau_bar(device);
-       struct nouveau_fb *pfb = nouveau_fb(device);
-       struct dcb_table *dcb = &drm->vbios.dcb;
-       struct drm_connector *connector, *tmp;
-       struct pci_dev *pdev = dev->pdev;
-       struct nvd0_display *disp;
-       struct dcb_output *dcbe;
-       int crtcs, ret, i;
-
-       disp = kzalloc(sizeof(*disp), GFP_KERNEL);
-       if (!disp)
-               return -ENOMEM;
-
-       nouveau_display(dev)->priv = disp;
-       nouveau_display(dev)->dtor = nvd0_display_destroy;
-       nouveau_display(dev)->init = nvd0_display_init;
-       nouveau_display(dev)->fini = nvd0_display_fini;
-
-       /* create crtc objects to represent the hw heads */
-       crtcs = nv_rd32(device, 0x022448);
-       for (i = 0; i < crtcs; i++) {
-               ret = nvd0_crtc_create(dev, i);
-               if (ret)
-                       goto out;
-       }
-
-       /* create encoder/connector objects based on VBIOS DCB table */
-       for (i = 0, dcbe = &dcb->entry[0]; i < dcb->entries; i++, dcbe++) {
-               connector = nouveau_connector_create(dev, dcbe->connector);
-               if (IS_ERR(connector))
-                       continue;
-
-               if (dcbe->location != DCB_LOC_ON_CHIP) {
-                       NV_WARN(drm, "skipping off-chip encoder %d/%d\n",
-                               dcbe->type, ffs(dcbe->or) - 1);
-                       continue;
-               }
-
-               switch (dcbe->type) {
-               case DCB_OUTPUT_TMDS:
-               case DCB_OUTPUT_LVDS:
-               case DCB_OUTPUT_DP:
-                       nvd0_sor_create(connector, dcbe);
-                       break;
-               case DCB_OUTPUT_ANALOG:
-                       nvd0_dac_create(connector, dcbe);
-                       break;
-               default:
-                       NV_WARN(drm, "skipping unsupported encoder %d/%d\n",
-                               dcbe->type, ffs(dcbe->or) - 1);
-                       continue;
-               }
-       }
-
-       /* cull any connectors we created that don't have an encoder */
-       list_for_each_entry_safe(connector, tmp, &dev->mode_config.connector_list, head) {
-               if (connector->encoder_ids[0])
-                       continue;
-
-               NV_WARN(drm, "%s has no encoders, removing\n",
-                       drm_get_connector_name(connector));
-               connector->funcs->destroy(connector);
-       }
-
-       /* setup interrupt handling */
-       tasklet_init(&disp->tasklet, nvd0_display_bh, (unsigned long)dev);
-
-       /* small shared memory area we use for notifiers and semaphores */
-       ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
-                            0, 0x0000, NULL, &disp->sync);
-       if (!ret) {
-               ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM);
-               if (!ret)
-                       ret = nouveau_bo_map(disp->sync);
-               if (ret)
-                       nouveau_bo_ref(NULL, &disp->sync);
-       }
-
-       if (ret)
-               goto out;
-
-       /* hash table and dma objects for the memory areas we care about */
-       ret = nouveau_gpuobj_new(nv_object(device), NULL, 0x4000, 0x10000,
-                                NVOBJ_FLAG_ZERO_ALLOC, &disp->mem);
-       if (ret)
-               goto out;
-
-       /* create evo dma channels */
-       for (i = 0; i < EVO_DMA_NR; i++) {
-               struct evo *evo = &disp->evo[i];
-               u64 offset = disp->sync->bo.offset;
-               u32 dmao = 0x1000 + (i * 0x100);
-               u32 hash = 0x0000 + (i * 0x040);
-
-               evo->idx = i;
-               evo->sem.offset = EVO_SYNC(evo->idx, 0x00);
-               evo->ptr = pci_alloc_consistent(pdev, PAGE_SIZE, &evo->handle);
-               if (!evo->ptr) {
-                       ret = -ENOMEM;
-                       goto out;
-               }
-
-               nv_wo32(disp->mem, dmao + 0x00, 0x00000049);
-               nv_wo32(disp->mem, dmao + 0x04, (offset + 0x0000) >> 8);
-               nv_wo32(disp->mem, dmao + 0x08, (offset + 0x0fff) >> 8);
-               nv_wo32(disp->mem, dmao + 0x0c, 0x00000000);
-               nv_wo32(disp->mem, dmao + 0x10, 0x00000000);
-               nv_wo32(disp->mem, dmao + 0x14, 0x00000000);
-               nv_wo32(disp->mem, hash + 0x00, NvEvoSync);
-               nv_wo32(disp->mem, hash + 0x04, 0x00000001 | (i << 27) |
-                                               ((dmao + 0x00) << 9));
-
-               nv_wo32(disp->mem, dmao + 0x20, 0x00000049);
-               nv_wo32(disp->mem, dmao + 0x24, 0x00000000);
-               nv_wo32(disp->mem, dmao + 0x28, (pfb->ram.size - 1) >> 8);
-               nv_wo32(disp->mem, dmao + 0x2c, 0x00000000);
-               nv_wo32(disp->mem, dmao + 0x30, 0x00000000);
-               nv_wo32(disp->mem, dmao + 0x34, 0x00000000);
-               nv_wo32(disp->mem, hash + 0x08, NvEvoVRAM);
-               nv_wo32(disp->mem, hash + 0x0c, 0x00000001 | (i << 27) |
-                                               ((dmao + 0x20) << 9));
-
-               nv_wo32(disp->mem, dmao + 0x40, 0x00000009);
-               nv_wo32(disp->mem, dmao + 0x44, 0x00000000);
-               nv_wo32(disp->mem, dmao + 0x48, (pfb->ram.size - 1) >> 8);
-               nv_wo32(disp->mem, dmao + 0x4c, 0x00000000);
-               nv_wo32(disp->mem, dmao + 0x50, 0x00000000);
-               nv_wo32(disp->mem, dmao + 0x54, 0x00000000);
-               nv_wo32(disp->mem, hash + 0x10, NvEvoVRAM_LP);
-               nv_wo32(disp->mem, hash + 0x14, 0x00000001 | (i << 27) |
-                                               ((dmao + 0x40) << 9));
-
-               nv_wo32(disp->mem, dmao + 0x60, 0x0fe00009);
-               nv_wo32(disp->mem, dmao + 0x64, 0x00000000);
-               nv_wo32(disp->mem, dmao + 0x68, (pfb->ram.size - 1) >> 8);
-               nv_wo32(disp->mem, dmao + 0x6c, 0x00000000);
-               nv_wo32(disp->mem, dmao + 0x70, 0x00000000);
-               nv_wo32(disp->mem, dmao + 0x74, 0x00000000);
-               nv_wo32(disp->mem, hash + 0x18, NvEvoFB32);
-               nv_wo32(disp->mem, hash + 0x1c, 0x00000001 | (i << 27) |
-                                               ((dmao + 0x60) << 9));
-       }
-
-       bar->flush(bar);
-
-out:
-       if (ret)
-               nvd0_display_destroy(dev);
-       return ret;
-}
index 24d932f..9175615 100644 (file)
@@ -561,6 +561,8 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
                /* use frac fb div on APUs */
                if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev))
                        radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
+               if (ASIC_IS_DCE32(rdev) && mode->clock > 165000)
+                       radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
        } else {
                radeon_crtc->pll_flags |= RADEON_PLL_LEGACY;
 
index d5699fe..064023b 100644 (file)
@@ -34,8 +34,7 @@
 
 /* move these to drm_dp_helper.c/h */
 #define DP_LINK_CONFIGURATION_SIZE 9
-#define DP_LINK_STATUS_SIZE       6
-#define DP_DPCD_SIZE              8
+#define DP_DPCD_SIZE DP_RECEIVER_CAP_SIZE
 
 static char *voltage_names[] = {
         "0.4V", "0.6V", "0.8V", "1.2V"
@@ -290,78 +289,6 @@ int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
 
 /***** general DP utility functions *****/
 
-static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r)
-{
-       return link_status[r - DP_LANE0_1_STATUS];
-}
-
-static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE],
-                            int lane)
-{
-       int i = DP_LANE0_1_STATUS + (lane >> 1);
-       int s = (lane & 1) * 4;
-       u8 l = dp_link_status(link_status, i);
-       return (l >> s) & 0xf;
-}
-
-static bool dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
-                                int lane_count)
-{
-       int lane;
-       u8 lane_status;
-
-       for (lane = 0; lane < lane_count; lane++) {
-               lane_status = dp_get_lane_status(link_status, lane);
-               if ((lane_status & DP_LANE_CR_DONE) == 0)
-                       return false;
-       }
-       return true;
-}
-
-static bool dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
-                            int lane_count)
-{
-       u8 lane_align;
-       u8 lane_status;
-       int lane;
-
-       lane_align = dp_link_status(link_status,
-                                   DP_LANE_ALIGN_STATUS_UPDATED);
-       if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
-               return false;
-       for (lane = 0; lane < lane_count; lane++) {
-               lane_status = dp_get_lane_status(link_status, lane);
-               if ((lane_status & DP_CHANNEL_EQ_BITS) != DP_CHANNEL_EQ_BITS)
-                       return false;
-       }
-       return true;
-}
-
-static u8 dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
-                                       int lane)
-
-{
-       int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
-       int s = ((lane & 1) ?
-                DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
-                DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
-       u8 l = dp_link_status(link_status, i);
-
-       return ((l >> s) & 0x3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
-}
-
-static u8 dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
-                                            int lane)
-{
-       int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
-       int s = ((lane & 1) ?
-                DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
-                DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
-       u8 l = dp_link_status(link_status, i);
-
-       return ((l >> s) & 0x3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
-}
-
 #define DP_VOLTAGE_MAX         DP_TRAIN_VOLTAGE_SWING_1200
 #define DP_PRE_EMPHASIS_MAX    DP_TRAIN_PRE_EMPHASIS_9_5
 
@@ -374,8 +301,8 @@ static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE],
        int lane;
 
        for (lane = 0; lane < lane_count; lane++) {
-               u8 this_v = dp_get_adjust_request_voltage(link_status, lane);
-               u8 this_p = dp_get_adjust_request_pre_emphasis(link_status, lane);
+               u8 this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
+               u8 this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
 
                DRM_DEBUG_KMS("requested signal parameters: lane %d voltage %s pre_emph %s\n",
                          lane,
@@ -420,37 +347,6 @@ static int dp_get_max_dp_pix_clock(int link_rate,
        return (link_rate * lane_num * 8) / bpp;
 }
 
-static int dp_get_max_link_rate(u8 dpcd[DP_DPCD_SIZE])
-{
-       switch (dpcd[DP_MAX_LINK_RATE]) {
-       case DP_LINK_BW_1_62:
-       default:
-               return 162000;
-       case DP_LINK_BW_2_7:
-               return 270000;
-       case DP_LINK_BW_5_4:
-               return 540000;
-       }
-}
-
-static u8 dp_get_max_lane_number(u8 dpcd[DP_DPCD_SIZE])
-{
-       return dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
-}
-
-static u8 dp_get_dp_link_rate_coded(int link_rate)
-{
-       switch (link_rate) {
-       case 162000:
-       default:
-               return DP_LINK_BW_1_62;
-       case 270000:
-               return DP_LINK_BW_2_7;
-       case 540000:
-               return DP_LINK_BW_5_4;
-       }
-}
-
 /***** radeon specific DP functions *****/
 
 /* First get the min lane# when low rate is used according to pixel clock
@@ -462,8 +358,8 @@ static int radeon_dp_get_dp_lane_number(struct drm_connector *connector,
                                        int pix_clock)
 {
        int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector));
-       int max_link_rate = dp_get_max_link_rate(dpcd);
-       int max_lane_num = dp_get_max_lane_number(dpcd);
+       int max_link_rate = drm_dp_max_link_rate(dpcd);
+       int max_lane_num = drm_dp_max_lane_count(dpcd);
        int lane_num;
        int max_dp_pix_clock;
 
@@ -500,7 +396,7 @@ static int radeon_dp_get_dp_link_clock(struct drm_connector *connector,
                        return 540000;
        }
 
-       return dp_get_max_link_rate(dpcd);
+       return drm_dp_max_link_rate(dpcd);
 }
 
 static u8 radeon_dp_encoder_service(struct radeon_device *rdev,
@@ -551,14 +447,15 @@ static void radeon_dp_probe_oui(struct radeon_connector *radeon_connector)
 bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
 {
        struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
-       u8 msg[25];
+       u8 msg[DP_DPCD_SIZE];
        int ret, i;
 
-       ret = radeon_dp_aux_native_read(radeon_connector, DP_DPCD_REV, msg, 8, 0);
+       ret = radeon_dp_aux_native_read(radeon_connector, DP_DPCD_REV, msg,
+                                       DP_DPCD_SIZE, 0);
        if (ret > 0) {
-               memcpy(dig_connector->dpcd, msg, 8);
+               memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
                DRM_DEBUG_KMS("DPCD: ");
-               for (i = 0; i < 8; i++)
+               for (i = 0; i < DP_DPCD_SIZE; i++)
                        DRM_DEBUG_KMS("%02x ", msg[i]);
                DRM_DEBUG_KMS("\n");
 
@@ -664,7 +561,7 @@ bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector)
 
        if (!radeon_dp_get_link_status(radeon_connector, link_status))
                return false;
-       if (dp_channel_eq_ok(link_status, dig->dp_lane_count))
+       if (drm_dp_channel_eq_ok(link_status, dig->dp_lane_count))
                return false;
        return true;
 }
@@ -677,9 +574,8 @@ struct radeon_dp_link_train_info {
        int enc_id;
        int dp_clock;
        int dp_lane_count;
-       int rd_interval;
        bool tp3_supported;
-       u8 dpcd[8];
+       u8 dpcd[DP_RECEIVER_CAP_SIZE];
        u8 train_set[4];
        u8 link_status[DP_LINK_STATUS_SIZE];
        u8 tries;
@@ -765,7 +661,7 @@ static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info)
        radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LANE_COUNT_SET, tmp);
 
        /* set the link rate on the sink */
-       tmp = dp_get_dp_link_rate_coded(dp_info->dp_clock);
+       tmp = drm_dp_link_rate_to_bw_code(dp_info->dp_clock);
        radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LINK_BW_SET, tmp);
 
        /* start training on the source */
@@ -821,17 +717,14 @@ static int radeon_dp_link_train_cr(struct radeon_dp_link_train_info *dp_info)
        dp_info->tries = 0;
        voltage = 0xff;
        while (1) {
-               if (dp_info->rd_interval == 0)
-                       udelay(100);
-               else
-                       mdelay(dp_info->rd_interval * 4);
+               drm_dp_link_train_clock_recovery_delay(dp_info->dpcd);
 
                if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) {
                        DRM_ERROR("displayport link status failed\n");
                        break;
                }
 
-               if (dp_clock_recovery_ok(dp_info->link_status, dp_info->dp_lane_count)) {
+               if (drm_dp_clock_recovery_ok(dp_info->link_status, dp_info->dp_lane_count)) {
                        clock_recovery = true;
                        break;
                }
@@ -886,17 +779,14 @@ static int radeon_dp_link_train_ce(struct radeon_dp_link_train_info *dp_info)
        dp_info->tries = 0;
        channel_eq = false;
        while (1) {
-               if (dp_info->rd_interval == 0)
-                       udelay(400);
-               else
-                       mdelay(dp_info->rd_interval * 4);
+               drm_dp_link_train_channel_eq_delay(dp_info->dpcd);
 
                if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) {
                        DRM_ERROR("displayport link status failed\n");
                        break;
                }
 
-               if (dp_channel_eq_ok(dp_info->link_status, dp_info->dp_lane_count)) {
+               if (drm_dp_channel_eq_ok(dp_info->link_status, dp_info->dp_lane_count)) {
                        channel_eq = true;
                        break;
                }
@@ -974,14 +864,13 @@ void radeon_dp_link_train(struct drm_encoder *encoder,
        else
                dp_info.enc_id |= ATOM_DP_CONFIG_LINK_A;
 
-       dp_info.rd_interval = radeon_read_dpcd_reg(radeon_connector, DP_TRAINING_AUX_RD_INTERVAL);
        tmp = radeon_read_dpcd_reg(radeon_connector, DP_MAX_LANE_COUNT);
        if (ASIC_IS_DCE5(rdev) && (tmp & DP_TPS3_SUPPORTED))
                dp_info.tp3_supported = true;
        else
                dp_info.tp3_supported = false;
 
-       memcpy(dp_info.dpcd, dig_connector->dpcd, 8);
+       memcpy(dp_info.dpcd, dig_connector->dpcd, DP_RECEIVER_CAP_SIZE);
        dp_info.rdev = rdev;
        dp_info.encoder = encoder;
        dp_info.connector = connector;
index 010bae1..4552d4a 100644 (file)
@@ -340,7 +340,7 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
            ((radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
             (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE))) {
                struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
-               radeon_dp_set_link_config(connector, mode);
+               radeon_dp_set_link_config(connector, adjusted_mode);
        }
 
        return true;
index 5d1d21a..f95d7fc 100644 (file)
@@ -1821,7 +1821,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
        case CHIP_SUMO:
                rdev->config.evergreen.num_ses = 1;
                rdev->config.evergreen.max_pipes = 4;
-               rdev->config.evergreen.max_tile_pipes = 2;
+               rdev->config.evergreen.max_tile_pipes = 4;
                if (rdev->pdev->device == 0x9648)
                        rdev->config.evergreen.max_simds = 3;
                else if ((rdev->pdev->device == 0x9647) ||
@@ -1844,7 +1844,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
                rdev->config.evergreen.sc_prim_fifo_size = 0x40;
                rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
                rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
-               gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
+               gb_addr_config = SUMO_GB_ADDR_CONFIG_GOLDEN;
                break;
        case CHIP_SUMO2:
                rdev->config.evergreen.num_ses = 1;
@@ -1866,7 +1866,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
                rdev->config.evergreen.sc_prim_fifo_size = 0x40;
                rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
                rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
-               gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
+               gb_addr_config = SUMO2_GB_ADDR_CONFIG_GOLDEN;
                break;
        case CHIP_BARTS:
                rdev->config.evergreen.num_ses = 2;
@@ -1914,7 +1914,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
                break;
        case CHIP_CAICOS:
                rdev->config.evergreen.num_ses = 1;
-               rdev->config.evergreen.max_pipes = 4;
+               rdev->config.evergreen.max_pipes = 2;
                rdev->config.evergreen.max_tile_pipes = 2;
                rdev->config.evergreen.max_simds = 2;
                rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
@@ -2034,6 +2034,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
        WREG32(GB_ADDR_CONFIG, gb_addr_config);
        WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
        WREG32(HDP_ADDR_CONFIG, gb_addr_config);
+       WREG32(DMA_TILING_CONFIG, gb_addr_config);
 
        tmp = gb_addr_config & NUM_PIPES_MASK;
        tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends,
@@ -2403,8 +2404,12 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev)
                                         CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
                cayman_cp_int_cntl_setup(rdev, 1, 0);
                cayman_cp_int_cntl_setup(rdev, 2, 0);
+               tmp = RREG32(CAYMAN_DMA1_CNTL) & ~TRAP_ENABLE;
+               WREG32(CAYMAN_DMA1_CNTL, tmp);
        } else
                WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+       tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
+       WREG32(DMA_CNTL, tmp);
        WREG32(GRBM_INT_CNTL, 0);
        WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
        WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
@@ -2457,6 +2462,7 @@ int evergreen_irq_set(struct radeon_device *rdev)
        u32 grbm_int_cntl = 0;
        u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
        u32 afmt1 = 0, afmt2 = 0, afmt3 = 0, afmt4 = 0, afmt5 = 0, afmt6 = 0;
+       u32 dma_cntl, dma_cntl1 = 0;
 
        if (!rdev->irq.installed) {
                WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
@@ -2484,6 +2490,8 @@ int evergreen_irq_set(struct radeon_device *rdev)
        afmt5 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
        afmt6 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
 
+       dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
+
        if (rdev->family >= CHIP_CAYMAN) {
                /* enable CP interrupts on all rings */
                if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
@@ -2506,6 +2514,19 @@ int evergreen_irq_set(struct radeon_device *rdev)
                }
        }
 
+       if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
+               DRM_DEBUG("r600_irq_set: sw int dma\n");
+               dma_cntl |= TRAP_ENABLE;
+       }
+
+       if (rdev->family >= CHIP_CAYMAN) {
+               dma_cntl1 = RREG32(CAYMAN_DMA1_CNTL) & ~TRAP_ENABLE;
+               if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_DMA1_INDEX])) {
+                       DRM_DEBUG("r600_irq_set: sw int dma1\n");
+                       dma_cntl1 |= TRAP_ENABLE;
+               }
+       }
+
        if (rdev->irq.crtc_vblank_int[0] ||
            atomic_read(&rdev->irq.pflip[0])) {
                DRM_DEBUG("evergreen_irq_set: vblank 0\n");
@@ -2591,6 +2612,12 @@ int evergreen_irq_set(struct radeon_device *rdev)
                cayman_cp_int_cntl_setup(rdev, 2, cp_int_cntl2);
        } else
                WREG32(CP_INT_CNTL, cp_int_cntl);
+
+       WREG32(DMA_CNTL, dma_cntl);
+
+       if (rdev->family >= CHIP_CAYMAN)
+               WREG32(CAYMAN_DMA1_CNTL, dma_cntl1);
+
        WREG32(GRBM_INT_CNTL, grbm_int_cntl);
 
        WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
@@ -3093,6 +3120,16 @@ restart_ih:
                                break;
                        }
                        break;
+               case 146:
+               case 147:
+                       dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
+                       dev_err(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
+                               RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
+                       dev_err(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
+                               RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
+                       /* reset addr and status */
+                       WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
+                       break;
                case 176: /* CP_INT in ring buffer */
                case 177: /* CP_INT in IB1 */
                case 178: /* CP_INT in IB2 */
@@ -3116,9 +3153,19 @@ restart_ih:
                        } else
                                radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
                        break;
+               case 224: /* DMA trap event */
+                       DRM_DEBUG("IH: DMA trap\n");
+                       radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
+                       break;
                case 233: /* GUI IDLE */
                        DRM_DEBUG("IH: GUI idle\n");
                        break;
+               case 244: /* DMA trap event */
+                       if (rdev->family >= CHIP_CAYMAN) {
+                               DRM_DEBUG("IH: DMA1 trap\n");
+                               radeon_fence_process(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
+                       }
+                       break;
                default:
                        DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
                        break;
@@ -3144,6 +3191,143 @@ restart_ih:
        return IRQ_HANDLED;
 }
 
+/**
+ * evergreen_dma_fence_ring_emit - emit a fence on the DMA ring
+ *
+ * @rdev: radeon_device pointer
+ * @fence: radeon fence object
+ *
+ * Add a DMA fence packet to the ring to write
+ * the fence seq number and DMA trap packet to generate
+ * an interrupt if needed (evergreen-SI).
+ */
+void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
+                                  struct radeon_fence *fence)
+{
+       struct radeon_ring *ring = &rdev->ring[fence->ring];
+       u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
+       /* write the fence */
+       radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0));
+       radeon_ring_write(ring, addr & 0xfffffffc);
+       radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
+       radeon_ring_write(ring, fence->seq);
+       /* generate an interrupt */
+       radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0));
+       /* flush HDP */
+       radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
+       radeon_ring_write(ring, (0xf << 16) | HDP_MEM_COHERENCY_FLUSH_CNTL);
+       radeon_ring_write(ring, 1);
+}
+
+/**
+ * evergreen_dma_ring_ib_execute - schedule an IB on the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to schedule
+ *
+ * Schedule an IB in the DMA ring (evergreen).
+ */
+void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
+                                  struct radeon_ib *ib)
+{
+       struct radeon_ring *ring = &rdev->ring[ib->ring];
+
+       if (rdev->wb.enabled) {
+               u32 next_rptr = ring->wptr + 4;
+               while ((next_rptr & 7) != 5)
+                       next_rptr++;
+               next_rptr += 3;
+               radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
+               radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+               radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
+               radeon_ring_write(ring, next_rptr);
+       }
+
+       /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
+        * Pad as necessary with NOPs.
+        */
+       while ((ring->wptr & 7) != 5)
+               radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+       radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0));
+       radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
+       radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
+
+}
+
+/**
+ * evergreen_copy_dma - copy pages using the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @fence: radeon fence object
+ *
+ * Copy GPU paging using the DMA engine (evergreen-cayman).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+int evergreen_copy_dma(struct radeon_device *rdev,
+                      uint64_t src_offset, uint64_t dst_offset,
+                      unsigned num_gpu_pages,
+                      struct radeon_fence **fence)
+{
+       struct radeon_semaphore *sem = NULL;
+       int ring_index = rdev->asic->copy.dma_ring_index;
+       struct radeon_ring *ring = &rdev->ring[ring_index];
+       u32 size_in_dw, cur_size_in_dw;
+       int i, num_loops;
+       int r = 0;
+
+       r = radeon_semaphore_create(rdev, &sem);
+       if (r) {
+               DRM_ERROR("radeon: moving bo (%d).\n", r);
+               return r;
+       }
+
+       size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
+       num_loops = DIV_ROUND_UP(size_in_dw, 0xfffff);
+       r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
+       if (r) {
+               DRM_ERROR("radeon: moving bo (%d).\n", r);
+               radeon_semaphore_free(rdev, &sem, NULL);
+               return r;
+       }
+
+       if (radeon_fence_need_sync(*fence, ring->idx)) {
+               radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
+                                           ring->idx);
+               radeon_fence_note_sync(*fence, ring->idx);
+       } else {
+               radeon_semaphore_free(rdev, &sem, NULL);
+       }
+
+       for (i = 0; i < num_loops; i++) {
+               cur_size_in_dw = size_in_dw;
+               if (cur_size_in_dw > 0xFFFFF)
+                       cur_size_in_dw = 0xFFFFF;
+               size_in_dw -= cur_size_in_dw;
+               radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
+               radeon_ring_write(ring, dst_offset & 0xfffffffc);
+               radeon_ring_write(ring, src_offset & 0xfffffffc);
+               radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
+               radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
+               src_offset += cur_size_in_dw * 4;
+               dst_offset += cur_size_in_dw * 4;
+       }
+
+       r = radeon_fence_emit(rdev, fence, ring->idx);
+       if (r) {
+               radeon_ring_unlock_undo(rdev, ring);
+               return r;
+       }
+
+       radeon_ring_unlock_commit(rdev, ring);
+       radeon_semaphore_free(rdev, &sem, *fence);
+
+       return r;
+}
+
 static int evergreen_startup(struct radeon_device *rdev)
 {
        struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
@@ -3207,6 +3391,12 @@ static int evergreen_startup(struct radeon_device *rdev)
                return r;
        }
 
+       r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
+       if (r) {
+               dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+               return r;
+       }
+
        /* Enable IRQ */
        r = r600_irq_init(rdev);
        if (r) {
@@ -3221,12 +3411,23 @@ static int evergreen_startup(struct radeon_device *rdev)
                             0, 0xfffff, RADEON_CP_PACKET2);
        if (r)
                return r;
+
+       ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+       r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
+                            DMA_RB_RPTR, DMA_RB_WPTR,
+                            2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+       if (r)
+               return r;
+
        r = evergreen_cp_load_microcode(rdev);
        if (r)
                return r;
        r = evergreen_cp_resume(rdev);
        if (r)
                return r;
+       r = r600_dma_resume(rdev);
+       if (r)
+               return r;
 
        r = radeon_ib_pool_init(rdev);
        if (r) {
@@ -3273,11 +3474,9 @@ int evergreen_resume(struct radeon_device *rdev)
 
 int evergreen_suspend(struct radeon_device *rdev)
 {
-       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
-
        r600_audio_fini(rdev);
        r700_cp_stop(rdev);
-       ring->ready = false;
+       r600_dma_stop(rdev);
        evergreen_irq_suspend(rdev);
        radeon_wb_disable(rdev);
        evergreen_pcie_gart_disable(rdev);
@@ -3354,6 +3553,9 @@ int evergreen_init(struct radeon_device *rdev)
        rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
        r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
 
+       rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
+       r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
+
        rdev->ih.ring_obj = NULL;
        r600_ih_ring_init(rdev, 64 * 1024);
 
@@ -3366,6 +3568,7 @@ int evergreen_init(struct radeon_device *rdev)
        if (r) {
                dev_err(rdev->dev, "disabling GPU acceleration\n");
                r700_cp_fini(rdev);
+               r600_dma_fini(rdev);
                r600_irq_fini(rdev);
                radeon_wb_fini(rdev);
                radeon_ib_pool_fini(rdev);
@@ -3393,6 +3596,7 @@ void evergreen_fini(struct radeon_device *rdev)
        r600_audio_fini(rdev);
        r600_blit_fini(rdev);
        r700_cp_fini(rdev);
+       r600_dma_fini(rdev);
        r600_irq_fini(rdev);
        radeon_wb_fini(rdev);
        radeon_ib_pool_fini(rdev);
index c042e49..74c6b42 100644 (file)
@@ -34,6 +34,8 @@
 #define MAX(a,b)                   (((a)>(b))?(a):(b))
 #define MIN(a,b)                   (((a)<(b))?(a):(b))
 
+int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
+                          struct radeon_cs_reloc **cs_reloc);
 static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p,
                                          struct radeon_cs_reloc **cs_reloc);
 
@@ -507,20 +509,28 @@ static int evergreen_cs_track_validate_htile(struct radeon_cs_parser *p,
                /* height is npipes htiles aligned == npipes * 8 pixel aligned */
                nby = round_up(nby, track->npipes * 8);
        } else {
+               /* always assume 8x8 htile */
+               /* align is htile align * 8, htile align vary according to
+                * number of pipe and tile width and nby
+                */
                switch (track->npipes) {
                case 8:
+                       /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
                        nbx = round_up(nbx, 64 * 8);
                        nby = round_up(nby, 64 * 8);
                        break;
                case 4:
+                       /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
                        nbx = round_up(nbx, 64 * 8);
                        nby = round_up(nby, 32 * 8);
                        break;
                case 2:
+                       /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
                        nbx = round_up(nbx, 32 * 8);
                        nby = round_up(nby, 32 * 8);
                        break;
                case 1:
+                       /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
                        nbx = round_up(nbx, 32 * 8);
                        nby = round_up(nby, 16 * 8);
                        break;
@@ -531,9 +541,10 @@ static int evergreen_cs_track_validate_htile(struct radeon_cs_parser *p,
                }
        }
        /* compute number of htile */
-       nbx = nbx / 8;
-       nby = nby / 8;
-       size = nbx * nby * 4;
+       nbx = nbx >> 3;
+       nby = nby >> 3;
+       /* size must be aligned on npipes * 2K boundary */
+       size = roundup(nbx * nby * 4, track->npipes * (2 << 10));
        size += track->htile_offset;
 
        if (size > radeon_bo_size(track->htile_bo)) {
@@ -1790,6 +1801,8 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
        case DB_HTILE_SURFACE:
                /* 8x8 only */
                track->htile_surface = radeon_get_ib_value(p, idx);
+               /* force 8x8 htile width and height */
+               ib[idx] |= 3;
                track->db_dirty = true;
                break;
        case CB_IMMED0_BASE:
@@ -2232,6 +2245,107 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
                        ib[idx+2] = upper_32_bits(offset) & 0xff;
                }
                break;
+       case PACKET3_CP_DMA:
+       {
+               u32 command, size, info;
+               u64 offset, tmp;
+               if (pkt->count != 4) {
+                       DRM_ERROR("bad CP DMA\n");
+                       return -EINVAL;
+               }
+               command = radeon_get_ib_value(p, idx+4);
+               size = command & 0x1fffff;
+               info = radeon_get_ib_value(p, idx+1);
+               if ((((info & 0x60000000) >> 29) != 0) || /* src = GDS or DATA */
+                   (((info & 0x00300000) >> 20) != 0) || /* dst = GDS */
+                   ((((info & 0x00300000) >> 20) == 0) &&
+                    (command & PACKET3_CP_DMA_CMD_DAS)) || /* dst = register */
+                   ((((info & 0x60000000) >> 29) == 0) &&
+                    (command & PACKET3_CP_DMA_CMD_SAS))) { /* src = register */
+                       /* non mem to mem copies requires dw aligned count */
+                       if (size % 4) {
+                               DRM_ERROR("CP DMA command requires dw count alignment\n");
+                               return -EINVAL;
+                       }
+               }
+               if (command & PACKET3_CP_DMA_CMD_SAS) {
+                       /* src address space is register */
+                       /* GDS is ok */
+                       if (((info & 0x60000000) >> 29) != 1) {
+                               DRM_ERROR("CP DMA SAS not supported\n");
+                               return -EINVAL;
+                       }
+               } else {
+                       if (command & PACKET3_CP_DMA_CMD_SAIC) {
+                               DRM_ERROR("CP DMA SAIC only supported for registers\n");
+                               return -EINVAL;
+                       }
+                       /* src address space is memory */
+                       if (((info & 0x60000000) >> 29) == 0) {
+                               r = evergreen_cs_packet_next_reloc(p, &reloc);
+                               if (r) {
+                                       DRM_ERROR("bad CP DMA SRC\n");
+                                       return -EINVAL;
+                               }
+
+                               tmp = radeon_get_ib_value(p, idx) +
+                                       ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
+
+                               offset = reloc->lobj.gpu_offset + tmp;
+
+                               if ((tmp + size) > radeon_bo_size(reloc->robj)) {
+                                       dev_warn(p->dev, "CP DMA src buffer too small (%llu %lu)\n",
+                                                tmp + size, radeon_bo_size(reloc->robj));
+                                       return -EINVAL;
+                               }
+
+                               ib[idx] = offset;
+                               ib[idx+1] = (ib[idx+1] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
+                       } else if (((info & 0x60000000) >> 29) != 2) {
+                               DRM_ERROR("bad CP DMA SRC_SEL\n");
+                               return -EINVAL;
+                       }
+               }
+               if (command & PACKET3_CP_DMA_CMD_DAS) {
+                       /* dst address space is register */
+                       /* GDS is ok */
+                       if (((info & 0x00300000) >> 20) != 1) {
+                               DRM_ERROR("CP DMA DAS not supported\n");
+                               return -EINVAL;
+                       }
+               } else {
+                       /* dst address space is memory */
+                       if (command & PACKET3_CP_DMA_CMD_DAIC) {
+                               DRM_ERROR("CP DMA DAIC only supported for registers\n");
+                               return -EINVAL;
+                       }
+                       if (((info & 0x00300000) >> 20) == 0) {
+                               r = evergreen_cs_packet_next_reloc(p, &reloc);
+                               if (r) {
+                                       DRM_ERROR("bad CP DMA DST\n");
+                                       return -EINVAL;
+                               }
+
+                               tmp = radeon_get_ib_value(p, idx+2) +
+                                       ((u64)(radeon_get_ib_value(p, idx+3) & 0xff) << 32);
+
+                               offset = reloc->lobj.gpu_offset + tmp;
+
+                               if ((tmp + size) > radeon_bo_size(reloc->robj)) {
+                                       dev_warn(p->dev, "CP DMA dst buffer too small (%llu %lu)\n",
+                                                tmp + size, radeon_bo_size(reloc->robj));
+                                       return -EINVAL;
+                               }
+
+                               ib[idx+2] = offset;
+                               ib[idx+3] = upper_32_bits(offset) & 0xff;
+                       } else {
+                               DRM_ERROR("bad CP DMA DST_SEL\n");
+                               return -EINVAL;
+                       }
+               }
+               break;
+       }
        case PACKET3_SURFACE_SYNC:
                if (pkt->count != 3) {
                        DRM_ERROR("bad SURFACE_SYNC\n");
@@ -2715,6 +2829,455 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
        return 0;
 }
 
+/*
+ *  DMA
+ */
+
+#define GET_DMA_CMD(h) (((h) & 0xf0000000) >> 28)
+#define GET_DMA_COUNT(h) ((h) & 0x000fffff)
+#define GET_DMA_T(h) (((h) & 0x00800000) >> 23)
+#define GET_DMA_NEW(h) (((h) & 0x04000000) >> 26)
+#define GET_DMA_MISC(h) (((h) & 0x0700000) >> 20)
+
+/**
+ * evergreen_dma_cs_parse() - parse the DMA IB
+ * @p:         parser structure holding parsing context.
+ *
+ * Parses the DMA IB from the CS ioctl and updates
+ * the GPU addresses based on the reloc information and
+ * checks for errors. (Evergreen-Cayman)
+ * Returns 0 for success and an error on failure.
+ **/
+int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
+{
+       struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
+       struct radeon_cs_reloc *src_reloc, *dst_reloc, *dst2_reloc;
+       u32 header, cmd, count, tiled, new_cmd, misc;
+       volatile u32 *ib = p->ib.ptr;
+       u32 idx, idx_value;
+       u64 src_offset, dst_offset, dst2_offset;
+       int r;
+
+       do {
+               if (p->idx >= ib_chunk->length_dw) {
+                       DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
+                                 p->idx, ib_chunk->length_dw);
+                       return -EINVAL;
+               }
+               idx = p->idx;
+               header = radeon_get_ib_value(p, idx);
+               cmd = GET_DMA_CMD(header);
+               count = GET_DMA_COUNT(header);
+               tiled = GET_DMA_T(header);
+               new_cmd = GET_DMA_NEW(header);
+               misc = GET_DMA_MISC(header);
+
+               switch (cmd) {
+               case DMA_PACKET_WRITE:
+                       r = r600_dma_cs_next_reloc(p, &dst_reloc);
+                       if (r) {
+                               DRM_ERROR("bad DMA_PACKET_WRITE\n");
+                               return -EINVAL;
+                       }
+                       if (tiled) {
+                               dst_offset = ib[idx+1];
+                               dst_offset <<= 8;
+
+                               ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+                               p->idx += count + 7;
+                       } else {
+                               dst_offset = ib[idx+1];
+                               dst_offset |= ((u64)(ib[idx+2] & 0xff)) << 32;
+
+                               ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+                               ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+                               p->idx += count + 3;
+                       }
+                       if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+                               dev_warn(p->dev, "DMA write buffer too small (%llu %lu)\n",
+                                        dst_offset, radeon_bo_size(dst_reloc->robj));
+                               return -EINVAL;
+                       }
+                       break;
+               case DMA_PACKET_COPY:
+                       r = r600_dma_cs_next_reloc(p, &src_reloc);
+                       if (r) {
+                               DRM_ERROR("bad DMA_PACKET_COPY\n");
+                               return -EINVAL;
+                       }
+                       r = r600_dma_cs_next_reloc(p, &dst_reloc);
+                       if (r) {
+                               DRM_ERROR("bad DMA_PACKET_COPY\n");
+                               return -EINVAL;
+                       }
+                       if (tiled) {
+                               idx_value = radeon_get_ib_value(p, idx + 2);
+                               if (new_cmd) {
+                                       switch (misc) {
+                                       case 0:
+                                               /* L2T, frame to fields */
+                                               if (idx_value & (1 << 31)) {
+                                                       DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
+                                                       return -EINVAL;
+                                               }
+                                               r = r600_dma_cs_next_reloc(p, &dst2_reloc);
+                                               if (r) {
+                                                       DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
+                                                       return -EINVAL;
+                                               }
+                                               dst_offset = ib[idx+1];
+                                               dst_offset <<= 8;
+                                               dst2_offset = ib[idx+2];
+                                               dst2_offset <<= 8;
+                                               src_offset = ib[idx+8];
+                                               src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32;
+                                               if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+                                                       dev_warn(p->dev, "DMA L2T, frame to fields src buffer too small (%llu %lu)\n",
+                                                                src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+                                                       return -EINVAL;
+                                               }
+                                               if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+                                                       dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
+                                                                dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+                                                       return -EINVAL;
+                                               }
+                                               if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
+                                                       dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
+                                                                dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+                                                       return -EINVAL;
+                                               }
+                                               ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+                                               ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
+                                               ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+                                               ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+                                               p->idx += 10;
+                                               break;
+                                       case 1:
+                                               /* L2T, T2L partial */
+                                               if (p->family < CHIP_CAYMAN) {
+                                                       DRM_ERROR("L2T, T2L Partial is cayman only !\n");
+                                                       return -EINVAL;
+                                               }
+                                               /* detile bit */
+                                               if (idx_value & (1 << 31)) {
+                                                       /* tiled src, linear dst */
+                                                       ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+
+                                                       ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+                                                       ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+                                               } else {
+                                                       /* linear src, tiled dst */
+                                                       ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+                                                       ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+
+                                                       ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+                                               }
+                                               p->idx += 12;
+                                               break;
+                                       case 3:
+                                               /* L2T, broadcast */
+                                               if (idx_value & (1 << 31)) {
+                                                       DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+                                                       return -EINVAL;
+                                               }
+                                               r = r600_dma_cs_next_reloc(p, &dst2_reloc);
+                                               if (r) {
+                                                       DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+                                                       return -EINVAL;
+                                               }
+                                               dst_offset = ib[idx+1];
+                                               dst_offset <<= 8;
+                                               dst2_offset = ib[idx+2];
+                                               dst2_offset <<= 8;
+                                               src_offset = ib[idx+8];
+                                               src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32;
+                                               if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+                                                       dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
+                                                                src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+                                                       return -EINVAL;
+                                               }
+                                               if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+                                                       dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
+                                                                dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+                                                       return -EINVAL;
+                                               }
+                                               if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
+                                                       dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
+                                                                dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+                                                       return -EINVAL;
+                                               }
+                                               ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+                                               ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
+                                               ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+                                               ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+                                               p->idx += 10;
+                                               break;
+                                       case 4:
+                                               /* L2T, T2L */
+                                               /* detile bit */
+                                               if (idx_value & (1 << 31)) {
+                                                       /* tiled src, linear dst */
+                                                       src_offset = ib[idx+1];
+                                                       src_offset <<= 8;
+                                                       ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+
+                                                       dst_offset = ib[idx+7];
+                                                       dst_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
+                                                       ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+                                                       ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+                                               } else {
+                                                       /* linear src, tiled dst */
+                                                       src_offset = ib[idx+7];
+                                                       src_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
+                                                       ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+                                                       ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+
+                                                       dst_offset = ib[idx+1];
+                                                       dst_offset <<= 8;
+                                                       ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+                                               }
+                                               if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+                                                       dev_warn(p->dev, "DMA L2T, T2L src buffer too small (%llu %lu)\n",
+                                                                src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+                                                       return -EINVAL;
+                                               }
+                                               if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+                                                       dev_warn(p->dev, "DMA L2T, T2L dst buffer too small (%llu %lu)\n",
+                                                                dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+                                                       return -EINVAL;
+                                               }
+                                               p->idx += 9;
+                                               break;
+                                       case 5:
+                                               /* T2T partial */
+                                               if (p->family < CHIP_CAYMAN) {
+                                                       DRM_ERROR("L2T, T2L Partial is cayman only !\n");
+                                                       return -EINVAL;
+                                               }
+                                               ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+                                               ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+                                               p->idx += 13;
+                                               break;
+                                       case 7:
+                                               /* L2T, broadcast */
+                                               if (idx_value & (1 << 31)) {
+                                                       DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+                                                       return -EINVAL;
+                                               }
+                                               r = r600_dma_cs_next_reloc(p, &dst2_reloc);
+                                               if (r) {
+                                                       DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+                                                       return -EINVAL;
+                                               }
+                                               dst_offset = ib[idx+1];
+                                               dst_offset <<= 8;
+                                               dst2_offset = ib[idx+2];
+                                               dst2_offset <<= 8;
+                                               src_offset = ib[idx+8];
+                                               src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32;
+                                               if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+                                                       dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
+                                                                src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+                                                       return -EINVAL;
+                                               }
+                                               if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+                                                       dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
+                                                                dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+                                                       return -EINVAL;
+                                               }
+                                               if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
+                                                       dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
+                                                                dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+                                                       return -EINVAL;
+                                               }
+                                               ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+                                               ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
+                                               ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+                                               ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+                                               p->idx += 10;
+                                               break;
+                                       default:
+                                               DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
+                                               return -EINVAL;
+                                       }
+                               } else {
+                                       switch (misc) {
+                                       case 0:
+                                               /* detile bit */
+                                               if (idx_value & (1 << 31)) {
+                                                       /* tiled src, linear dst */
+                                                       src_offset = ib[idx+1];
+                                                       src_offset <<= 8;
+                                                       ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+
+                                                       dst_offset = ib[idx+7];
+                                                       dst_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
+                                                       ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+                                                       ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+                                               } else {
+                                                       /* linear src, tiled dst */
+                                                       src_offset = ib[idx+7];
+                                                       src_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
+                                                       ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+                                                       ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+
+                                                       dst_offset = ib[idx+1];
+                                                       dst_offset <<= 8;
+                                                       ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+                                               }
+                                               if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+                                                       dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
+                                                                src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+                                                       return -EINVAL;
+                                               }
+                                               if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+                                                       dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
+                                                                dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+                                                       return -EINVAL;
+                                               }
+                                               p->idx += 9;
+                                               break;
+                                       default:
+                                               DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
+                                               return -EINVAL;
+                                       }
+                               }
+                       } else {
+                               if (new_cmd) {
+                                       switch (misc) {
+                                       case 0:
+                                               /* L2L, byte */
+                                               src_offset = ib[idx+2];
+                                               src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
+                                               dst_offset = ib[idx+1];
+                                               dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32;
+                                               if ((src_offset + count) > radeon_bo_size(src_reloc->robj)) {
+                                                       dev_warn(p->dev, "DMA L2L, byte src buffer too small (%llu %lu)\n",
+                                                                src_offset + count, radeon_bo_size(src_reloc->robj));
+                                                       return -EINVAL;
+                                               }
+                                               if ((dst_offset + count) > radeon_bo_size(dst_reloc->robj)) {
+                                                       dev_warn(p->dev, "DMA L2L, byte dst buffer too small (%llu %lu)\n",
+                                                                dst_offset + count, radeon_bo_size(dst_reloc->robj));
+                                                       return -EINVAL;
+                                               }
+                                               ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff);
+                                               ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff);
+                                               ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+                                               ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+                                               p->idx += 5;
+                                               break;
+                                       case 1:
+                                               /* L2L, partial */
+                                               if (p->family < CHIP_CAYMAN) {
+                                                       DRM_ERROR("L2L Partial is cayman only !\n");
+                                                       return -EINVAL;
+                                               }
+                                               ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff);
+                                               ib[idx+2] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+                                               ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff);
+                                               ib[idx+5] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+
+                                               p->idx += 9;
+                                               break;
+                                       case 4:
+                                               /* L2L, dw, broadcast */
+                                               r = r600_dma_cs_next_reloc(p, &dst2_reloc);
+                                               if (r) {
+                                                       DRM_ERROR("bad L2L, dw, broadcast DMA_PACKET_COPY\n");
+                                                       return -EINVAL;
+                                               }
+                                               dst_offset = ib[idx+1];
+                                               dst_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
+                                               dst2_offset = ib[idx+2];
+                                               dst2_offset |= ((u64)(ib[idx+5] & 0xff)) << 32;
+                                               src_offset = ib[idx+3];
+                                               src_offset |= ((u64)(ib[idx+6] & 0xff)) << 32;
+                                               if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+                                                       dev_warn(p->dev, "DMA L2L, dw, broadcast src buffer too small (%llu %lu)\n",
+                                                                src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+                                                       return -EINVAL;
+                                               }
+                                               if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+                                                       dev_warn(p->dev, "DMA L2L, dw, broadcast dst buffer too small (%llu %lu)\n",
+                                                                dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+                                                       return -EINVAL;
+                                               }
+                                               if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
+                                                       dev_warn(p->dev, "DMA L2L, dw, broadcast dst2 buffer too small (%llu %lu)\n",
+                                                                dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+                                                       return -EINVAL;
+                                               }
+                                               ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+                                               ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset & 0xfffffffc);
+                                               ib[idx+3] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+                                               ib[idx+4] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+                                               ib[idx+5] += upper_32_bits(dst2_reloc->lobj.gpu_offset) & 0xff;
+                                               ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+                                               p->idx += 7;
+                                               break;
+                                       default:
+                                               DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
+                                               return -EINVAL;
+                                       }
+                               } else {
+                                       /* L2L, dw */
+                                       src_offset = ib[idx+2];
+                                       src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
+                                       dst_offset = ib[idx+1];
+                                       dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32;
+                                       if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+                                               dev_warn(p->dev, "DMA L2L, dw src buffer too small (%llu %lu)\n",
+                                                        src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+                                               return -EINVAL;
+                                       }
+                                       if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+                                               dev_warn(p->dev, "DMA L2L, dw dst buffer too small (%llu %lu)\n",
+                                                        dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+                                               return -EINVAL;
+                                       }
+                                       ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+                                       ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+                                       ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+                                       ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+                                       p->idx += 5;
+                               }
+                       }
+                       break;
+               case DMA_PACKET_CONSTANT_FILL:
+                       r = r600_dma_cs_next_reloc(p, &dst_reloc);
+                       if (r) {
+                               DRM_ERROR("bad DMA_PACKET_CONSTANT_FILL\n");
+                               return -EINVAL;
+                       }
+                       dst_offset = ib[idx+1];
+                       dst_offset |= ((u64)(ib[idx+3] & 0x00ff0000)) << 16;
+                       if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+                               dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
+                                        dst_offset, radeon_bo_size(dst_reloc->robj));
+                               return -EINVAL;
+                       }
+                       ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+                       ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) << 16) & 0x00ff0000;
+                       p->idx += 4;
+                       break;
+               case DMA_PACKET_NOP:
+                       p->idx += 1;
+                       break;
+               default:
+                       DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
+                       return -EINVAL;
+               }
+       } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+#if 0
+       for (r = 0; r < p->ib->length_dw; r++) {
+               printk(KERN_INFO "%05d  0x%08X\n", r, p->ib.ptr[r]);
+               mdelay(1);
+       }
+#endif
+       return 0;
+}
+
 /* vm parser */
 static bool evergreen_vm_reg_valid(u32 reg)
 {
@@ -2843,6 +3406,7 @@ static int evergreen_vm_packet3_check(struct radeon_device *rdev,
        u32 idx = pkt->idx + 1;
        u32 idx_value = ib[idx];
        u32 start_reg, end_reg, reg, i;
+       u32 command, info;
 
        switch (pkt->opcode) {
        case PACKET3_NOP:
@@ -2917,6 +3481,64 @@ static int evergreen_vm_packet3_check(struct radeon_device *rdev,
                                return -EINVAL;
                }
                break;
+       case PACKET3_CP_DMA:
+               command = ib[idx + 4];
+               info = ib[idx + 1];
+               if ((((info & 0x60000000) >> 29) != 0) || /* src = GDS or DATA */
+                   (((info & 0x00300000) >> 20) != 0) || /* dst = GDS */
+                   ((((info & 0x00300000) >> 20) == 0) &&
+                    (command & PACKET3_CP_DMA_CMD_DAS)) || /* dst = register */
+                   ((((info & 0x60000000) >> 29) == 0) &&
+                    (command & PACKET3_CP_DMA_CMD_SAS))) { /* src = register */
+                       /* non mem to mem copies requires dw aligned count */
+                       if ((command & 0x1fffff) % 4) {
+                               DRM_ERROR("CP DMA command requires dw count alignment\n");
+                               return -EINVAL;
+                       }
+               }
+               if (command & PACKET3_CP_DMA_CMD_SAS) {
+                       /* src address space is register */
+                       if (((info & 0x60000000) >> 29) == 0) {
+                               start_reg = idx_value << 2;
+                               if (command & PACKET3_CP_DMA_CMD_SAIC) {
+                                       reg = start_reg;
+                                       if (!evergreen_vm_reg_valid(reg)) {
+                                               DRM_ERROR("CP DMA Bad SRC register\n");
+                                               return -EINVAL;
+                                       }
+                               } else {
+                                       for (i = 0; i < (command & 0x1fffff); i++) {
+                                               reg = start_reg + (4 * i);
+                                               if (!evergreen_vm_reg_valid(reg)) {
+                                                       DRM_ERROR("CP DMA Bad SRC register\n");
+                                                       return -EINVAL;
+                                               }
+                                       }
+                               }
+                       }
+               }
+               if (command & PACKET3_CP_DMA_CMD_DAS) {
+                       /* dst address space is register */
+                       if (((info & 0x00300000) >> 20) == 0) {
+                               start_reg = ib[idx + 2];
+                               if (command & PACKET3_CP_DMA_CMD_DAIC) {
+                                       reg = start_reg;
+                                       if (!evergreen_vm_reg_valid(reg)) {
+                                               DRM_ERROR("CP DMA Bad DST register\n");
+                                               return -EINVAL;
+                                       }
+                               } else {
+                                       for (i = 0; i < (command & 0x1fffff); i++) {
+                                               reg = start_reg + (4 * i);
+                                               if (!evergreen_vm_reg_valid(reg)) {
+                                                       DRM_ERROR("CP DMA Bad DST register\n");
+                                                       return -EINVAL;
+                                               }
+                                       }
+                               }
+                       }
+               }
+               break;
        default:
                return -EINVAL;
        }
@@ -2958,3 +3580,114 @@ int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
 
        return ret;
 }
+
+/**
+ * evergreen_dma_ib_parse() - parse the DMA IB for VM
+ * @rdev: radeon_device pointer
+ * @ib:        radeon_ib pointer
+ *
+ * Parses the DMA IB from the VM CS ioctl
+ * checks for errors. (Cayman-SI)
+ * Returns 0 for success and an error on failure.
+ **/
+int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+       u32 idx = 0;
+       u32 header, cmd, count, tiled, new_cmd, misc;
+
+       do {
+               header = ib->ptr[idx];
+               cmd = GET_DMA_CMD(header);
+               count = GET_DMA_COUNT(header);
+               tiled = GET_DMA_T(header);
+               new_cmd = GET_DMA_NEW(header);
+               misc = GET_DMA_MISC(header);
+
+               switch (cmd) {
+               case DMA_PACKET_WRITE:
+                       if (tiled)
+                               idx += count + 7;
+                       else
+                               idx += count + 3;
+                       break;
+               case DMA_PACKET_COPY:
+                       if (tiled) {
+                               if (new_cmd) {
+                                       switch (misc) {
+                                       case 0:
+                                               /* L2T, frame to fields */
+                                               idx += 10;
+                                               break;
+                                       case 1:
+                                               /* L2T, T2L partial */
+                                               idx += 12;
+                                               break;
+                                       case 3:
+                                               /* L2T, broadcast */
+                                               idx += 10;
+                                               break;
+                                       case 4:
+                                               /* L2T, T2L */
+                                               idx += 9;
+                                               break;
+                                       case 5:
+                                               /* T2T partial */
+                                               idx += 13;
+                                               break;
+                                       case 7:
+                                               /* L2T, broadcast */
+                                               idx += 10;
+                                               break;
+                                       default:
+                                               DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
+                                               return -EINVAL;
+                                       }
+                               } else {
+                                       switch (misc) {
+                                       case 0:
+                                               idx += 9;
+                                               break;
+                                       default:
+                                               DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
+                                               return -EINVAL;
+                                       }
+                               }
+                       } else {
+                               if (new_cmd) {
+                                       switch (misc) {
+                                       case 0:
+                                               /* L2L, byte */
+                                               idx += 5;
+                                               break;
+                                       case 1:
+                                               /* L2L, partial */
+                                               idx += 9;
+                                               break;
+                                       case 4:
+                                               /* L2L, dw, broadcast */
+                                               idx += 7;
+                                               break;
+                                       default:
+                                               DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
+                                               return -EINVAL;
+                                       }
+                               } else {
+                                       /* L2L, dw */
+                                       idx += 5;
+                               }
+                       }
+                       break;
+               case DMA_PACKET_CONSTANT_FILL:
+                       idx += 4;
+                       break;
+               case DMA_PACKET_NOP:
+                       idx += 1;
+                       break;
+               default:
+                       DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
+                       return -EINVAL;
+               }
+       } while (idx < ib->length_dw);
+
+       return 0;
+}
index 2bc0f6a..cb9baaa 100644 (file)
@@ -45,6 +45,8 @@
 #define TURKS_GB_ADDR_CONFIG_GOLDEN          0x02010002
 #define CEDAR_GB_ADDR_CONFIG_GOLDEN          0x02010001
 #define CAICOS_GB_ADDR_CONFIG_GOLDEN         0x02010001
+#define SUMO_GB_ADDR_CONFIG_GOLDEN           0x02010002
+#define SUMO2_GB_ADDR_CONFIG_GOLDEN          0x02010002
 
 /* Registers */
 
 #       define AFMT_MPEG_INFO_UPDATE         (1 << 10)
 #define AFMT_GENERIC0_7                      0x7138
 
+/* DCE4/5 ELD audio interface */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0        0x5f84 /* LPCM */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1        0x5f88 /* AC3 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2        0x5f8c /* MPEG1 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR3        0x5f90 /* MP3 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR4        0x5f94 /* MPEG2 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR5        0x5f98 /* AAC */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR6        0x5f9c /* DTS */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR7        0x5fa0 /* ATRAC */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR8        0x5fa4 /* one bit audio - leave at 0 (default) */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR9        0x5fa8 /* Dolby Digital */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR10       0x5fac /* DTS-HD */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR11       0x5fb0 /* MAT-MLP */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR12       0x5fb4 /* DTS */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13       0x5fb8 /* WMA Pro */
+#       define MAX_CHANNELS(x)                            (((x) & 0x7) << 0)
+/* max channels minus one.  7 = 8 channels */
+#       define SUPPORTED_FREQUENCIES(x)                   (((x) & 0xff) << 8)
+#       define DESCRIPTOR_BYTE_2(x)                       (((x) & 0xff) << 16)
+#       define SUPPORTED_FREQUENCIES_STEREO(x)            (((x) & 0xff) << 24) /* LPCM only */
+/* SUPPORTED_FREQUENCIES, SUPPORTED_FREQUENCIES_STEREO
+ * bit0 = 32 kHz
+ * bit1 = 44.1 kHz
+ * bit2 = 48 kHz
+ * bit3 = 88.2 kHz
+ * bit4 = 96 kHz
+ * bit5 = 176.4 kHz
+ * bit6 = 192 kHz
+ */
+
+#define AZ_HOT_PLUG_CONTROL                               0x5e78
+#       define AZ_FORCE_CODEC_WAKE                        (1 << 0)
+#       define PIN0_JACK_DETECTION_ENABLE                 (1 << 4)
+#       define PIN1_JACK_DETECTION_ENABLE                 (1 << 5)
+#       define PIN2_JACK_DETECTION_ENABLE                 (1 << 6)
+#       define PIN3_JACK_DETECTION_ENABLE                 (1 << 7)
+#       define PIN0_UNSOLICITED_RESPONSE_ENABLE           (1 << 8)
+#       define PIN1_UNSOLICITED_RESPONSE_ENABLE           (1 << 9)
+#       define PIN2_UNSOLICITED_RESPONSE_ENABLE           (1 << 10)
+#       define PIN3_UNSOLICITED_RESPONSE_ENABLE           (1 << 11)
+#       define CODEC_HOT_PLUG_ENABLE                      (1 << 12)
+#       define PIN0_AUDIO_ENABLED                         (1 << 24)
+#       define PIN1_AUDIO_ENABLED                         (1 << 25)
+#       define PIN2_AUDIO_ENABLED                         (1 << 26)
+#       define PIN3_AUDIO_ENABLED                         (1 << 27)
+#       define AUDIO_ENABLED                              (1 << 31)
+
+
 #define        GC_USER_SHADER_PIPE_CONFIG                      0x8954
 #define                INACTIVE_QD_PIPES(x)                            ((x) << 8)
 #define                INACTIVE_QD_PIPES_MASK                          0x0000FF00
 #define                PAGE_TABLE_DEPTH(x)                             (((x) & 3) << 1)
 #define                RANGE_PROTECTION_FAULT_ENABLE_DEFAULT           (1 << 4)
 #define VM_CONTEXT1_CNTL                               0x1414
+#define VM_CONTEXT1_CNTL2                              0x1434
 #define        VM_CONTEXT0_PAGE_TABLE_BASE_ADDR                0x153C
 #define        VM_CONTEXT0_PAGE_TABLE_END_ADDR                 0x157C
 #define        VM_CONTEXT0_PAGE_TABLE_START_ADDR               0x155C
 #define                CACHE_UPDATE_MODE(x)                            ((x) << 6)
 #define        VM_L2_STATUS                                    0x140C
 #define                L2_BUSY                                         (1 << 0)
+#define        VM_CONTEXT1_PROTECTION_FAULT_ADDR               0x14FC
+#define        VM_CONTEXT1_PROTECTION_FAULT_STATUS             0x14DC
 
 #define        WAIT_UNTIL                                      0x8040
 
 #       define DC_HPDx_RX_INT_TIMER(x)                    ((x) << 16)
 #       define DC_HPDx_EN                                 (1 << 28)
 
+/* ASYNC DMA */
+#define DMA_RB_RPTR                                       0xd008
+#define DMA_RB_WPTR                                       0xd00c
+
+#define DMA_CNTL                                          0xd02c
+#       define TRAP_ENABLE                                (1 << 0)
+#       define SEM_INCOMPLETE_INT_ENABLE                  (1 << 1)
+#       define SEM_WAIT_INT_ENABLE                        (1 << 2)
+#       define DATA_SWAP_ENABLE                           (1 << 3)
+#       define FENCE_SWAP_ENABLE                          (1 << 4)
+#       define CTXEMPTY_INT_ENABLE                        (1 << 28)
+#define DMA_TILING_CONFIG                                0xD0B8
+
+#define CAYMAN_DMA1_CNTL                                  0xd82c
+
+/* async DMA packets */
+#define DMA_PACKET(cmd, t, s, n)       ((((cmd) & 0xF) << 28) |        \
+                                        (((t) & 0x1) << 23) |          \
+                                        (((s) & 0x1) << 22) |          \
+                                        (((n) & 0xFFFFF) << 0))
+/* async DMA Packet types */
+#define        DMA_PACKET_WRITE                                  0x2
+#define        DMA_PACKET_COPY                                   0x3
+#define        DMA_PACKET_INDIRECT_BUFFER                        0x4
+#define        DMA_PACKET_SEMAPHORE                              0x5
+#define        DMA_PACKET_FENCE                                  0x6
+#define        DMA_PACKET_TRAP                                   0x7
+#define        DMA_PACKET_SRBM_WRITE                             0x9
+#define        DMA_PACKET_CONSTANT_FILL                          0xd
+#define        DMA_PACKET_NOP                                    0xf
+
 /* PCIE link stuff */
 #define PCIE_LC_TRAINING_CNTL                             0xa1 /* PCIE_P */
 #define PCIE_LC_LINK_WIDTH_CNTL                           0xa2 /* PCIE_P */
 #define        PACKET3_WAIT_REG_MEM                            0x3C
 #define        PACKET3_MEM_WRITE                               0x3D
 #define        PACKET3_INDIRECT_BUFFER                         0x32
+#define        PACKET3_CP_DMA                                  0x41
+/* 1. header
+ * 2. SRC_ADDR_LO or DATA [31:0]
+ * 3. CP_SYNC [31] | SRC_SEL [30:29] | ENGINE [27] | DST_SEL [21:20] |
+ *    SRC_ADDR_HI [7:0]
+ * 4. DST_ADDR_LO [31:0]
+ * 5. DST_ADDR_HI [7:0]
+ * 6. COMMAND [29:22] | BYTE_COUNT [20:0]
+ */
+#              define PACKET3_CP_DMA_DST_SEL(x)    ((x) << 20)
+                /* 0 - SRC_ADDR
+                * 1 - GDS
+                */
+#              define PACKET3_CP_DMA_ENGINE(x)     ((x) << 27)
+                /* 0 - ME
+                * 1 - PFP
+                */
+#              define PACKET3_CP_DMA_SRC_SEL(x)    ((x) << 29)
+                /* 0 - SRC_ADDR
+                * 1 - GDS
+                * 2 - DATA
+                */
+#              define PACKET3_CP_DMA_CP_SYNC       (1 << 31)
+/* COMMAND */
+#              define PACKET3_CP_DMA_DIS_WC        (1 << 21)
+#              define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23)
+                /* 0 - none
+                * 1 - 8 in 16
+                * 2 - 8 in 32
+                * 3 - 8 in 64
+                */
+#              define PACKET3_CP_DMA_CMD_DST_SWAP(x) ((x) << 24)
+                /* 0 - none
+                * 1 - 8 in 16
+                * 2 - 8 in 32
+                * 3 - 8 in 64
+                */
+#              define PACKET3_CP_DMA_CMD_SAS       (1 << 26)
+                /* 0 - memory
+                * 1 - register
+                */
+#              define PACKET3_CP_DMA_CMD_DAS       (1 << 27)
+                /* 0 - memory
+                * 1 - register
+                */
+#              define PACKET3_CP_DMA_CMD_SAIC      (1 << 28)
+#              define PACKET3_CP_DMA_CMD_DAIC      (1 << 29)
 #define        PACKET3_SURFACE_SYNC                            0x43
 #              define PACKET3_CB0_DEST_BASE_ENA    (1 << 6)
 #              define PACKET3_CB1_DEST_BASE_ENA    (1 << 7)
index cda01f8..7bdbcb0 100644 (file)
@@ -611,6 +611,8 @@ static void cayman_gpu_init(struct radeon_device *rdev)
        WREG32(GB_ADDR_CONFIG, gb_addr_config);
        WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
        WREG32(HDP_ADDR_CONFIG, gb_addr_config);
+       WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
+       WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
 
        tmp = gb_addr_config & NUM_PIPES_MASK;
        tmp = r6xx_remap_render_backend(rdev, tmp,
@@ -784,10 +786,20 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev)
        /* enable context1-7 */
        WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
               (u32)(rdev->dummy_page.addr >> 12));
-       WREG32(VM_CONTEXT1_CNTL2, 0);
-       WREG32(VM_CONTEXT1_CNTL, 0);
+       WREG32(VM_CONTEXT1_CNTL2, 4);
        WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
-                               RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
+                               RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+                               RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
+                               DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+                               DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
+                               PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
+                               PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
+                               VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
+                               VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
+                               READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
+                               READ_PROTECTION_FAULT_ENABLE_DEFAULT |
+                               WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+                               WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
 
        cayman_pcie_gart_tlb_flush(rdev);
        DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
@@ -905,6 +917,7 @@ static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
                radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
                WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
                WREG32(SCRATCH_UMSK, 0);
+               rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
        }
 }
 
@@ -1118,6 +1131,181 @@ static int cayman_cp_resume(struct radeon_device *rdev)
        return 0;
 }
 
+/*
+ * DMA
+ * Starting with R600, the GPU has an asynchronous
+ * DMA engine.  The programming model is very similar
+ * to the 3D engine (ring buffer, IBs, etc.), but the
+ * DMA controller has it's own packet format that is
+ * different form the PM4 format used by the 3D engine.
+ * It supports copying data, writing embedded data,
+ * solid fills, and a number of other things.  It also
+ * has support for tiling/detiling of buffers.
+ * Cayman and newer support two asynchronous DMA engines.
+ */
+/**
+ * cayman_dma_ring_ib_execute - Schedule an IB on the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to schedule
+ *
+ * Schedule an IB in the DMA ring (cayman-SI).
+ */
+void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
+                               struct radeon_ib *ib)
+{
+       struct radeon_ring *ring = &rdev->ring[ib->ring];
+
+       if (rdev->wb.enabled) {
+               u32 next_rptr = ring->wptr + 4;
+               while ((next_rptr & 7) != 5)
+                       next_rptr++;
+               next_rptr += 3;
+               radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
+               radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+               radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
+               radeon_ring_write(ring, next_rptr);
+       }
+
+       /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
+        * Pad as necessary with NOPs.
+        */
+       while ((ring->wptr & 7) != 5)
+               radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+       radeon_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, ib->vm ? ib->vm->id : 0, 0));
+       radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
+       radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
+
+}
+
+/**
+ * cayman_dma_stop - stop the async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the async dma engines (cayman-SI).
+ */
+void cayman_dma_stop(struct radeon_device *rdev)
+{
+       u32 rb_cntl;
+
+       radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+
+       /* dma0 */
+       rb_cntl = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
+       rb_cntl &= ~DMA_RB_ENABLE;
+       WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, rb_cntl);
+
+       /* dma1 */
+       rb_cntl = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
+       rb_cntl &= ~DMA_RB_ENABLE;
+       WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, rb_cntl);
+
+       rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
+       rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false;
+}
+
+/**
+ * cayman_dma_resume - setup and start the async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Set up the DMA ring buffers and enable them. (cayman-SI).
+ * Returns 0 for success, error for failure.
+ */
+int cayman_dma_resume(struct radeon_device *rdev)
+{
+       struct radeon_ring *ring;
+       u32 rb_cntl, dma_cntl;
+       u32 rb_bufsz;
+       u32 reg_offset, wb_offset;
+       int i, r;
+
+       /* Reset dma */
+       WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
+       RREG32(SRBM_SOFT_RESET);
+       udelay(50);
+       WREG32(SRBM_SOFT_RESET, 0);
+
+       for (i = 0; i < 2; i++) {
+               if (i == 0) {
+                       ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+                       reg_offset = DMA0_REGISTER_OFFSET;
+                       wb_offset = R600_WB_DMA_RPTR_OFFSET;
+               } else {
+                       ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
+                       reg_offset = DMA1_REGISTER_OFFSET;
+                       wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET;
+               }
+
+               WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0);
+               WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0);
+
+               /* Set ring buffer size in dwords */
+               rb_bufsz = drm_order(ring->ring_size / 4);
+               rb_cntl = rb_bufsz << 1;
+#ifdef __BIG_ENDIAN
+               rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
+#endif
+               WREG32(DMA_RB_CNTL + reg_offset, rb_cntl);
+
+               /* Initialize the ring buffer's read and write pointers */
+               WREG32(DMA_RB_RPTR + reg_offset, 0);
+               WREG32(DMA_RB_WPTR + reg_offset, 0);
+
+               /* set the wb address whether it's enabled or not */
+               WREG32(DMA_RB_RPTR_ADDR_HI + reg_offset,
+                      upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFF);
+               WREG32(DMA_RB_RPTR_ADDR_LO + reg_offset,
+                      ((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC));
+
+               if (rdev->wb.enabled)
+                       rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
+
+               WREG32(DMA_RB_BASE + reg_offset, ring->gpu_addr >> 8);
+
+               /* enable DMA IBs */
+               WREG32(DMA_IB_CNTL + reg_offset, DMA_IB_ENABLE | CMD_VMID_FORCE);
+
+               dma_cntl = RREG32(DMA_CNTL + reg_offset);
+               dma_cntl &= ~CTXEMPTY_INT_ENABLE;
+               WREG32(DMA_CNTL + reg_offset, dma_cntl);
+
+               ring->wptr = 0;
+               WREG32(DMA_RB_WPTR + reg_offset, ring->wptr << 2);
+
+               ring->rptr = RREG32(DMA_RB_RPTR + reg_offset) >> 2;
+
+               WREG32(DMA_RB_CNTL + reg_offset, rb_cntl | DMA_RB_ENABLE);
+
+               ring->ready = true;
+
+               r = radeon_ring_test(rdev, ring->idx, ring);
+               if (r) {
+                       ring->ready = false;
+                       return r;
+               }
+       }
+
+       radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+
+       return 0;
+}
+
+/**
+ * cayman_dma_fini - tear down the async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the async dma engines and free the rings (cayman-SI).
+ */
+void cayman_dma_fini(struct radeon_device *rdev)
+{
+       cayman_dma_stop(rdev);
+       radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
+       radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
+}
+
 static int cayman_gpu_soft_reset(struct radeon_device *rdev)
 {
        struct evergreen_mc_save save;
@@ -1208,6 +1396,32 @@ int cayman_asic_reset(struct radeon_device *rdev)
        return cayman_gpu_soft_reset(rdev);
 }
 
+/**
+ * cayman_dma_is_lockup - Check if the DMA engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the async DMA engine is locked up (cayman-SI).
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+       u32 dma_status_reg;
+
+       if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+               dma_status_reg = RREG32(DMA_STATUS_REG + DMA0_REGISTER_OFFSET);
+       else
+               dma_status_reg = RREG32(DMA_STATUS_REG + DMA1_REGISTER_OFFSET);
+       if (dma_status_reg & DMA_IDLE) {
+               radeon_ring_lockup_update(ring);
+               return false;
+       }
+       /* force ring activities */
+       radeon_ring_force_activity(rdev, ring);
+       return radeon_ring_test_lockup(rdev, ring);
+}
+
 static int cayman_startup(struct radeon_device *rdev)
 {
        struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
@@ -1289,6 +1503,18 @@ static int cayman_startup(struct radeon_device *rdev)
                return r;
        }
 
+       r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
+       if (r) {
+               dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+               return r;
+       }
+
+       r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
+       if (r) {
+               dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+               return r;
+       }
+
        /* Enable IRQ */
        r = r600_irq_init(rdev);
        if (r) {
@@ -1303,6 +1529,23 @@ static int cayman_startup(struct radeon_device *rdev)
                             0, 0xfffff, RADEON_CP_PACKET2);
        if (r)
                return r;
+
+       ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+       r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
+                            DMA_RB_RPTR + DMA0_REGISTER_OFFSET,
+                            DMA_RB_WPTR + DMA0_REGISTER_OFFSET,
+                            2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+       if (r)
+               return r;
+
+       ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
+       r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
+                            DMA_RB_RPTR + DMA1_REGISTER_OFFSET,
+                            DMA_RB_WPTR + DMA1_REGISTER_OFFSET,
+                            2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+       if (r)
+               return r;
+
        r = cayman_cp_load_microcode(rdev);
        if (r)
                return r;
@@ -1310,6 +1553,10 @@ static int cayman_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
+       r = cayman_dma_resume(rdev);
+       if (r)
+               return r;
+
        r = radeon_ib_pool_init(rdev);
        if (r) {
                dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
@@ -1354,7 +1601,7 @@ int cayman_suspend(struct radeon_device *rdev)
 {
        r600_audio_fini(rdev);
        cayman_cp_enable(rdev, false);
-       rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+       cayman_dma_stop(rdev);
        evergreen_irq_suspend(rdev);
        radeon_wb_disable(rdev);
        cayman_pcie_gart_disable(rdev);
@@ -1421,6 +1668,14 @@ int cayman_init(struct radeon_device *rdev)
        ring->ring_obj = NULL;
        r600_ring_init(rdev, ring, 1024 * 1024);
 
+       ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+       ring->ring_obj = NULL;
+       r600_ring_init(rdev, ring, 64 * 1024);
+
+       ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
+       ring->ring_obj = NULL;
+       r600_ring_init(rdev, ring, 64 * 1024);
+
        rdev->ih.ring_obj = NULL;
        r600_ih_ring_init(rdev, 64 * 1024);
 
@@ -1433,6 +1688,7 @@ int cayman_init(struct radeon_device *rdev)
        if (r) {
                dev_err(rdev->dev, "disabling GPU acceleration\n");
                cayman_cp_fini(rdev);
+               cayman_dma_fini(rdev);
                r600_irq_fini(rdev);
                if (rdev->flags & RADEON_IS_IGP)
                        si_rlc_fini(rdev);
@@ -1463,6 +1719,7 @@ void cayman_fini(struct radeon_device *rdev)
 {
        r600_blit_fini(rdev);
        cayman_cp_fini(rdev);
+       cayman_dma_fini(rdev);
        r600_irq_fini(rdev);
        if (rdev->flags & RADEON_IS_IGP)
                si_rlc_fini(rdev);
@@ -1538,30 +1795,57 @@ void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
 {
        struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index];
        uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
-
-       while (count) {
-               unsigned ndw = 1 + count * 2;
-               if (ndw > 0x3FFF)
-                       ndw = 0x3FFF;
-
-               radeon_ring_write(ring, PACKET3(PACKET3_ME_WRITE, ndw));
-               radeon_ring_write(ring, pe);
-               radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
-               for (; ndw > 1; ndw -= 2, --count, pe += 8) {
-                       uint64_t value = 0;
-                       if (flags & RADEON_VM_PAGE_SYSTEM) {
-                               value = radeon_vm_map_gart(rdev, addr);
-                               value &= 0xFFFFFFFFFFFFF000ULL;
+       uint64_t value;
+       unsigned ndw;
+
+       if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) {
+               while (count) {
+                       ndw = 1 + count * 2;
+                       if (ndw > 0x3FFF)
+                               ndw = 0x3FFF;
+
+                       radeon_ring_write(ring, PACKET3(PACKET3_ME_WRITE, ndw));
+                       radeon_ring_write(ring, pe);
+                       radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
+                       for (; ndw > 1; ndw -= 2, --count, pe += 8) {
+                               if (flags & RADEON_VM_PAGE_SYSTEM) {
+                                       value = radeon_vm_map_gart(rdev, addr);
+                                       value &= 0xFFFFFFFFFFFFF000ULL;
+                               } else if (flags & RADEON_VM_PAGE_VALID) {
+                                       value = addr;
+                               } else {
+                                       value = 0;
+                               }
                                addr += incr;
-
-                       } else if (flags & RADEON_VM_PAGE_VALID) {
-                               value = addr;
+                               value |= r600_flags;
+                               radeon_ring_write(ring, value);
+                               radeon_ring_write(ring, upper_32_bits(value));
+                       }
+               }
+       } else {
+               while (count) {
+                       ndw = count * 2;
+                       if (ndw > 0xFFFFE)
+                               ndw = 0xFFFFE;
+
+                       /* for non-physically contiguous pages (system) */
+                       radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw));
+                       radeon_ring_write(ring, pe);
+                       radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
+                       for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+                               if (flags & RADEON_VM_PAGE_SYSTEM) {
+                                       value = radeon_vm_map_gart(rdev, addr);
+                                       value &= 0xFFFFFFFFFFFFF000ULL;
+                               } else if (flags & RADEON_VM_PAGE_VALID) {
+                                       value = addr;
+                               } else {
+                                       value = 0;
+                               }
                                addr += incr;
+                               value |= r600_flags;
+                               radeon_ring_write(ring, value);
+                               radeon_ring_write(ring, upper_32_bits(value));
                        }
-
-                       value |= r600_flags;
-                       radeon_ring_write(ring, value);
-                       radeon_ring_write(ring, upper_32_bits(value));
                }
        }
 }
@@ -1596,3 +1880,26 @@ void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
        radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
        radeon_ring_write(ring, 0x0);
 }
+
+void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
+{
+       struct radeon_ring *ring = &rdev->ring[ridx];
+
+       if (vm == NULL)
+               return;
+
+       radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
+       radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
+       radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
+
+       /* flush hdp cache */
+       radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
+       radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
+       radeon_ring_write(ring, 1);
+
+       /* bits 0-7 are the VM contexts0-7 */
+       radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
+       radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
+       radeon_ring_write(ring, 1 << vm->id);
+}
+
index cbef681..b93186b 100644 (file)
 #define                VMID(x)                                         (((x) & 0x7) << 0)
 #define        SRBM_STATUS                                     0x0E50
 
+#define        SRBM_SOFT_RESET                                 0x0E60
+#define                SOFT_RESET_BIF                          (1 << 1)
+#define                SOFT_RESET_CG                           (1 << 2)
+#define                SOFT_RESET_DC                           (1 << 5)
+#define                SOFT_RESET_DMA1                         (1 << 6)
+#define                SOFT_RESET_GRBM                         (1 << 8)
+#define                SOFT_RESET_HDP                          (1 << 9)
+#define                SOFT_RESET_IH                           (1 << 10)
+#define                SOFT_RESET_MC                           (1 << 11)
+#define                SOFT_RESET_RLC                          (1 << 13)
+#define                SOFT_RESET_ROM                          (1 << 14)
+#define                SOFT_RESET_SEM                          (1 << 15)
+#define                SOFT_RESET_VMC                          (1 << 17)
+#define                SOFT_RESET_DMA                          (1 << 20)
+#define                SOFT_RESET_TST                          (1 << 21)
+#define                SOFT_RESET_REGBB                        (1 << 22)
+#define                SOFT_RESET_ORB                          (1 << 23)
+
 #define VM_CONTEXT0_REQUEST_RESPONSE                   0x1470
 #define                REQUEST_TYPE(x)                                 (((x) & 0xf) << 0)
 #define                RESPONSE_TYPE_MASK                              0x000000F0
 #define VM_CONTEXT0_CNTL                               0x1410
 #define                ENABLE_CONTEXT                                  (1 << 0)
 #define                PAGE_TABLE_DEPTH(x)                             (((x) & 3) << 1)
+#define                RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT         (1 << 3)
 #define                RANGE_PROTECTION_FAULT_ENABLE_DEFAULT           (1 << 4)
+#define                DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT    (1 << 6)
+#define                DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT      (1 << 7)
+#define                PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT          (1 << 9)
+#define                PDE0_PROTECTION_FAULT_ENABLE_DEFAULT            (1 << 10)
+#define                VALID_PROTECTION_FAULT_ENABLE_INTERRUPT         (1 << 12)
+#define                VALID_PROTECTION_FAULT_ENABLE_DEFAULT           (1 << 13)
+#define                READ_PROTECTION_FAULT_ENABLE_INTERRUPT          (1 << 15)
+#define                READ_PROTECTION_FAULT_ENABLE_DEFAULT            (1 << 16)
+#define                WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT         (1 << 18)
+#define                WRITE_PROTECTION_FAULT_ENABLE_DEFAULT           (1 << 19)
 #define VM_CONTEXT1_CNTL                               0x1414
 #define VM_CONTEXT0_CNTL2                              0x1430
 #define VM_CONTEXT1_CNTL2                              0x1434
 #define        PACKET3_SET_APPEND_CNT                          0x75
 #define        PACKET3_ME_WRITE                                0x7A
 
+/* ASYNC DMA - first instance at 0xd000, second at 0xd800 */
+#define DMA0_REGISTER_OFFSET                              0x0 /* not a register */
+#define DMA1_REGISTER_OFFSET                              0x800 /* not a register */
+
+#define DMA_RB_CNTL                                       0xd000
+#       define DMA_RB_ENABLE                              (1 << 0)
+#       define DMA_RB_SIZE(x)                             ((x) << 1) /* log2 */
+#       define DMA_RB_SWAP_ENABLE                         (1 << 9) /* 8IN32 */
+#       define DMA_RPTR_WRITEBACK_ENABLE                  (1 << 12)
+#       define DMA_RPTR_WRITEBACK_SWAP_ENABLE             (1 << 13)  /* 8IN32 */
+#       define DMA_RPTR_WRITEBACK_TIMER(x)                ((x) << 16) /* log2 */
+#define DMA_RB_BASE                                       0xd004
+#define DMA_RB_RPTR                                       0xd008
+#define DMA_RB_WPTR                                       0xd00c
+
+#define DMA_RB_RPTR_ADDR_HI                               0xd01c
+#define DMA_RB_RPTR_ADDR_LO                               0xd020
+
+#define DMA_IB_CNTL                                       0xd024
+#       define DMA_IB_ENABLE                              (1 << 0)
+#       define DMA_IB_SWAP_ENABLE                         (1 << 4)
+#       define CMD_VMID_FORCE                             (1 << 31)
+#define DMA_IB_RPTR                                       0xd028
+#define DMA_CNTL                                          0xd02c
+#       define TRAP_ENABLE                                (1 << 0)
+#       define SEM_INCOMPLETE_INT_ENABLE                  (1 << 1)
+#       define SEM_WAIT_INT_ENABLE                        (1 << 2)
+#       define DATA_SWAP_ENABLE                           (1 << 3)
+#       define FENCE_SWAP_ENABLE                          (1 << 4)
+#       define CTXEMPTY_INT_ENABLE                        (1 << 28)
+#define DMA_STATUS_REG                                    0xd034
+#       define DMA_IDLE                                   (1 << 0)
+#define DMA_SEM_INCOMPLETE_TIMER_CNTL                     0xd044
+#define DMA_SEM_WAIT_FAIL_TIMER_CNTL                      0xd048
+#define DMA_TILING_CONFIG                                0xd0b8
+#define DMA_MODE                                          0xd0bc
+
+#define DMA_PACKET(cmd, t, s, n)       ((((cmd) & 0xF) << 28) |        \
+                                        (((t) & 0x1) << 23) |          \
+                                        (((s) & 0x1) << 22) |          \
+                                        (((n) & 0xFFFFF) << 0))
+
+#define DMA_IB_PACKET(cmd, vmid, n)    ((((cmd) & 0xF) << 28) |        \
+                                        (((vmid) & 0xF) << 20) |       \
+                                        (((n) & 0xFFFFF) << 0))
+
+/* async DMA Packet types */
+#define        DMA_PACKET_WRITE                                  0x2
+#define        DMA_PACKET_COPY                                   0x3
+#define        DMA_PACKET_INDIRECT_BUFFER                        0x4
+#define        DMA_PACKET_SEMAPHORE                              0x5
+#define        DMA_PACKET_FENCE                                  0x6
+#define        DMA_PACKET_TRAP                                   0x7
+#define        DMA_PACKET_SRBM_WRITE                             0x9
+#define        DMA_PACKET_CONSTANT_FILL                          0xd
+#define        DMA_PACKET_NOP                                    0xf
+
 #endif
 
index 376884f..8ff7cac 100644 (file)
@@ -4135,23 +4135,36 @@ int r100_init(struct radeon_device *rdev)
        return 0;
 }
 
-uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
+uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg,
+                     bool always_indirect)
 {
-       if (reg < rdev->rmmio_size)
+       if (reg < rdev->rmmio_size && !always_indirect)
                return readl(((void __iomem *)rdev->rmmio) + reg);
        else {
+               unsigned long flags;
+               uint32_t ret;
+
+               spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
                writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
-               return readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
+               ret = readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
+               spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
+
+               return ret;
        }
 }
 
-void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
+                 bool always_indirect)
 {
-       if (reg < rdev->rmmio_size)
+       if (reg < rdev->rmmio_size && !always_indirect)
                writel(v, ((void __iomem *)rdev->rmmio) + reg);
        else {
+               unsigned long flags;
+
+               spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
                writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
                writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
+               spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
        }
 }
 
index cda280d..2aaf147 100644 (file)
@@ -1370,6 +1370,29 @@ bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
        return radeon_ring_test_lockup(rdev, ring);
 }
 
+/**
+ * r600_dma_is_lockup - Check if the DMA engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the async DMA engine is locked up (r6xx-evergreen).
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+       u32 dma_status_reg;
+
+       dma_status_reg = RREG32(DMA_STATUS_REG);
+       if (dma_status_reg & DMA_IDLE) {
+               radeon_ring_lockup_update(ring);
+               return false;
+       }
+       /* force ring activities */
+       radeon_ring_force_activity(rdev, ring);
+       return radeon_ring_test_lockup(rdev, ring);
+}
+
 int r600_asic_reset(struct radeon_device *rdev)
 {
        return r600_gpu_soft_reset(rdev);
@@ -1424,13 +1447,7 @@ u32 r6xx_remap_render_backend(struct radeon_device *rdev,
 
 int r600_count_pipe_bits(uint32_t val)
 {
-       int i, ret = 0;
-
-       for (i = 0; i < 32; i++) {
-               ret += val & 1;
-               val >>= 1;
-       }
-       return ret;
+       return hweight32(val);
 }
 
 static void r600_gpu_init(struct radeon_device *rdev)
@@ -1594,6 +1611,7 @@ static void r600_gpu_init(struct radeon_device *rdev)
        WREG32(GB_TILING_CONFIG, tiling_config);
        WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
        WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
+       WREG32(DMA_TILING_CONFIG, tiling_config & 0xffff);
 
        tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
        WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
@@ -1871,6 +1889,7 @@ void r600_cp_stop(struct radeon_device *rdev)
        radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
        WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
        WREG32(SCRATCH_UMSK, 0);
+       rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
 }
 
 int r600_init_microcode(struct radeon_device *rdev)
@@ -2196,6 +2215,128 @@ void r600_cp_fini(struct radeon_device *rdev)
        radeon_scratch_free(rdev, ring->rptr_save_reg);
 }
 
+/*
+ * DMA
+ * Starting with R600, the GPU has an asynchronous
+ * DMA engine.  The programming model is very similar
+ * to the 3D engine (ring buffer, IBs, etc.), but the
+ * DMA controller has it's own packet format that is
+ * different form the PM4 format used by the 3D engine.
+ * It supports copying data, writing embedded data,
+ * solid fills, and a number of other things.  It also
+ * has support for tiling/detiling of buffers.
+ */
+/**
+ * r600_dma_stop - stop the async dma engine
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the async dma engine (r6xx-evergreen).
+ */
+void r600_dma_stop(struct radeon_device *rdev)
+{
+       u32 rb_cntl = RREG32(DMA_RB_CNTL);
+
+       radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+
+       rb_cntl &= ~DMA_RB_ENABLE;
+       WREG32(DMA_RB_CNTL, rb_cntl);
+
+       rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
+}
+
+/**
+ * r600_dma_resume - setup and start the async dma engine
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Set up the DMA ring buffer and enable it. (r6xx-evergreen).
+ * Returns 0 for success, error for failure.
+ */
+int r600_dma_resume(struct radeon_device *rdev)
+{
+       struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+       u32 rb_cntl, dma_cntl;
+       u32 rb_bufsz;
+       int r;
+
+       /* Reset dma */
+       if (rdev->family >= CHIP_RV770)
+               WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA);
+       else
+               WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
+       RREG32(SRBM_SOFT_RESET);
+       udelay(50);
+       WREG32(SRBM_SOFT_RESET, 0);
+
+       WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0);
+       WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
+
+       /* Set ring buffer size in dwords */
+       rb_bufsz = drm_order(ring->ring_size / 4);
+       rb_cntl = rb_bufsz << 1;
+#ifdef __BIG_ENDIAN
+       rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
+#endif
+       WREG32(DMA_RB_CNTL, rb_cntl);
+
+       /* Initialize the ring buffer's read and write pointers */
+       WREG32(DMA_RB_RPTR, 0);
+       WREG32(DMA_RB_WPTR, 0);
+
+       /* set the wb address whether it's enabled or not */
+       WREG32(DMA_RB_RPTR_ADDR_HI,
+              upper_32_bits(rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFF);
+       WREG32(DMA_RB_RPTR_ADDR_LO,
+              ((rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFFFFFFFC));
+
+       if (rdev->wb.enabled)
+               rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
+
+       WREG32(DMA_RB_BASE, ring->gpu_addr >> 8);
+
+       /* enable DMA IBs */
+       WREG32(DMA_IB_CNTL, DMA_IB_ENABLE);
+
+       dma_cntl = RREG32(DMA_CNTL);
+       dma_cntl &= ~CTXEMPTY_INT_ENABLE;
+       WREG32(DMA_CNTL, dma_cntl);
+
+       if (rdev->family >= CHIP_RV770)
+               WREG32(DMA_MODE, 1);
+
+       ring->wptr = 0;
+       WREG32(DMA_RB_WPTR, ring->wptr << 2);
+
+       ring->rptr = RREG32(DMA_RB_RPTR) >> 2;
+
+       WREG32(DMA_RB_CNTL, rb_cntl | DMA_RB_ENABLE);
+
+       ring->ready = true;
+
+       r = radeon_ring_test(rdev, R600_RING_TYPE_DMA_INDEX, ring);
+       if (r) {
+               ring->ready = false;
+               return r;
+       }
+
+       radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+
+       return 0;
+}
+
+/**
+ * r600_dma_fini - tear down the async dma engine
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the async dma engine and free the ring (r6xx-evergreen).
+ */
+void r600_dma_fini(struct radeon_device *rdev)
+{
+       r600_dma_stop(rdev);
+       radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
+}
 
 /*
  * GPU scratch registers helpers function.
@@ -2252,6 +2393,64 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
        return r;
 }
 
+/**
+ * r600_dma_ring_test - simple async dma engine test
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Test the DMA engine by writing using it to write an
+ * value to memory. (r6xx-SI).
+ * Returns 0 for success, error for failure.
+ */
+int r600_dma_ring_test(struct radeon_device *rdev,
+                      struct radeon_ring *ring)
+{
+       unsigned i;
+       int r;
+       void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
+       u32 tmp;
+
+       if (!ptr) {
+               DRM_ERROR("invalid vram scratch pointer\n");
+               return -EINVAL;
+       }
+
+       tmp = 0xCAFEDEAD;
+       writel(tmp, ptr);
+
+       r = radeon_ring_lock(rdev, ring, 4);
+       if (r) {
+               DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
+               return r;
+       }
+       radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
+       radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
+       radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff);
+       radeon_ring_write(ring, 0xDEADBEEF);
+       radeon_ring_unlock_commit(rdev, ring);
+
+       for (i = 0; i < rdev->usec_timeout; i++) {
+               tmp = readl(ptr);
+               if (tmp == 0xDEADBEEF)
+                       break;
+               DRM_UDELAY(1);
+       }
+
+       if (i < rdev->usec_timeout) {
+               DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
+       } else {
+               DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
+                         ring->idx, tmp);
+               r = -EINVAL;
+       }
+       return r;
+}
+
+/*
+ * CP fences/semaphores
+ */
+
 void r600_fence_ring_emit(struct radeon_device *rdev,
                          struct radeon_fence *fence)
 {
@@ -2315,6 +2514,59 @@ void r600_semaphore_ring_emit(struct radeon_device *rdev,
        radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
 }
 
+/*
+ * DMA fences/semaphores
+ */
+
+/**
+ * r600_dma_fence_ring_emit - emit a fence on the DMA ring
+ *
+ * @rdev: radeon_device pointer
+ * @fence: radeon fence object
+ *
+ * Add a DMA fence packet to the ring to write
+ * the fence seq number and DMA trap packet to generate
+ * an interrupt if needed (r6xx-r7xx).
+ */
+void r600_dma_fence_ring_emit(struct radeon_device *rdev,
+                             struct radeon_fence *fence)
+{
+       struct radeon_ring *ring = &rdev->ring[fence->ring];
+       u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
+
+       /* write the fence */
+       radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0));
+       radeon_ring_write(ring, addr & 0xfffffffc);
+       radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
+       radeon_ring_write(ring, lower_32_bits(fence->seq));
+       /* generate an interrupt */
+       radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0));
+}
+
+/**
+ * r600_dma_semaphore_ring_emit - emit a semaphore on the dma ring
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ * @semaphore: radeon semaphore object
+ * @emit_wait: wait or signal semaphore
+ *
+ * Add a DMA semaphore packet to the ring wait on or signal
+ * other rings (r6xx-SI).
+ */
+void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
+                                 struct radeon_ring *ring,
+                                 struct radeon_semaphore *semaphore,
+                                 bool emit_wait)
+{
+       u64 addr = semaphore->gpu_addr;
+       u32 s = emit_wait ? 0 : 1;
+
+       radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SEMAPHORE, 0, s, 0));
+       radeon_ring_write(ring, addr & 0xfffffffc);
+       radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
+}
+
 int r600_copy_blit(struct radeon_device *rdev,
                   uint64_t src_offset,
                   uint64_t dst_offset,
@@ -2334,6 +2586,80 @@ int r600_copy_blit(struct radeon_device *rdev,
        return 0;
 }
 
+/**
+ * r600_copy_dma - copy pages using the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @fence: radeon fence object
+ *
+ * Copy GPU paging using the DMA engine (r6xx-r7xx).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+int r600_copy_dma(struct radeon_device *rdev,
+                 uint64_t src_offset, uint64_t dst_offset,
+                 unsigned num_gpu_pages,
+                 struct radeon_fence **fence)
+{
+       struct radeon_semaphore *sem = NULL;
+       int ring_index = rdev->asic->copy.dma_ring_index;
+       struct radeon_ring *ring = &rdev->ring[ring_index];
+       u32 size_in_dw, cur_size_in_dw;
+       int i, num_loops;
+       int r = 0;
+
+       r = radeon_semaphore_create(rdev, &sem);
+       if (r) {
+               DRM_ERROR("radeon: moving bo (%d).\n", r);
+               return r;
+       }
+
+       size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
+       num_loops = DIV_ROUND_UP(size_in_dw, 0xffff);
+       r = radeon_ring_lock(rdev, ring, num_loops * 5 + 8);
+       if (r) {
+               DRM_ERROR("radeon: moving bo (%d).\n", r);
+               radeon_semaphore_free(rdev, &sem, NULL);
+               return r;
+       }
+
+       if (radeon_fence_need_sync(*fence, ring->idx)) {
+               radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
+                                           ring->idx);
+               radeon_fence_note_sync(*fence, ring->idx);
+       } else {
+               radeon_semaphore_free(rdev, &sem, NULL);
+       }
+
+       for (i = 0; i < num_loops; i++) {
+               cur_size_in_dw = size_in_dw;
+               if (cur_size_in_dw > 0xFFFF)
+                       cur_size_in_dw = 0xFFFF;
+               size_in_dw -= cur_size_in_dw;
+               radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
+               radeon_ring_write(ring, dst_offset & 0xfffffffc);
+               radeon_ring_write(ring, src_offset & 0xfffffffc);
+               radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
+               radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
+               src_offset += cur_size_in_dw * 4;
+               dst_offset += cur_size_in_dw * 4;
+       }
+
+       r = radeon_fence_emit(rdev, fence, ring->idx);
+       if (r) {
+               radeon_ring_unlock_undo(rdev, ring);
+               return r;
+       }
+
+       radeon_ring_unlock_commit(rdev, ring);
+       radeon_semaphore_free(rdev, &sem, *fence);
+
+       return r;
+}
+
 int r600_set_surface_reg(struct radeon_device *rdev, int reg,
                         uint32_t tiling_flags, uint32_t pitch,
                         uint32_t offset, uint32_t obj_size)
@@ -2349,7 +2675,7 @@ void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
 
 static int r600_startup(struct radeon_device *rdev)
 {
-       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+       struct radeon_ring *ring;
        int r;
 
        /* enable pcie gen2 link */
@@ -2394,6 +2720,12 @@ static int r600_startup(struct radeon_device *rdev)
                return r;
        }
 
+       r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
+       if (r) {
+               dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+               return r;
+       }
+
        /* Enable IRQ */
        r = r600_irq_init(rdev);
        if (r) {
@@ -2403,12 +2735,20 @@ static int r600_startup(struct radeon_device *rdev)
        }
        r600_irq_set(rdev);
 
+       ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
                             R600_CP_RB_RPTR, R600_CP_RB_WPTR,
                             0, 0xfffff, RADEON_CP_PACKET2);
+       if (r)
+               return r;
 
+       ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+       r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
+                            DMA_RB_RPTR, DMA_RB_WPTR,
+                            2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
        if (r)
                return r;
+
        r = r600_cp_load_microcode(rdev);
        if (r)
                return r;
@@ -2416,6 +2756,10 @@ static int r600_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
+       r = r600_dma_resume(rdev);
+       if (r)
+               return r;
+
        r = radeon_ib_pool_init(rdev);
        if (r) {
                dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
@@ -2471,7 +2815,7 @@ int r600_suspend(struct radeon_device *rdev)
 {
        r600_audio_fini(rdev);
        r600_cp_stop(rdev);
-       rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+       r600_dma_stop(rdev);
        r600_irq_suspend(rdev);
        radeon_wb_disable(rdev);
        r600_pcie_gart_disable(rdev);
@@ -2544,6 +2888,9 @@ int r600_init(struct radeon_device *rdev)
        rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
        r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
 
+       rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
+       r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
+
        rdev->ih.ring_obj = NULL;
        r600_ih_ring_init(rdev, 64 * 1024);
 
@@ -2556,6 +2903,7 @@ int r600_init(struct radeon_device *rdev)
        if (r) {
                dev_err(rdev->dev, "disabling GPU acceleration\n");
                r600_cp_fini(rdev);
+               r600_dma_fini(rdev);
                r600_irq_fini(rdev);
                radeon_wb_fini(rdev);
                radeon_ib_pool_fini(rdev);
@@ -2572,6 +2920,7 @@ void r600_fini(struct radeon_device *rdev)
        r600_audio_fini(rdev);
        r600_blit_fini(rdev);
        r600_cp_fini(rdev);
+       r600_dma_fini(rdev);
        r600_irq_fini(rdev);
        radeon_wb_fini(rdev);
        radeon_ib_pool_fini(rdev);
@@ -2674,6 +3023,104 @@ free_scratch:
        return r;
 }
 
+/**
+ * r600_dma_ib_test - test an IB on the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Test a simple IB in the DMA ring (r6xx-SI).
+ * Returns 0 on success, error on failure.
+ */
+int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+       struct radeon_ib ib;
+       unsigned i;
+       int r;
+       void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
+       u32 tmp = 0;
+
+       if (!ptr) {
+               DRM_ERROR("invalid vram scratch pointer\n");
+               return -EINVAL;
+       }
+
+       tmp = 0xCAFEDEAD;
+       writel(tmp, ptr);
+
+       r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
+       if (r) {
+               DRM_ERROR("radeon: failed to get ib (%d).\n", r);
+               return r;
+       }
+
+       ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1);
+       ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc;
+       ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff;
+       ib.ptr[3] = 0xDEADBEEF;
+       ib.length_dw = 4;
+
+       r = radeon_ib_schedule(rdev, &ib, NULL);
+       if (r) {
+               radeon_ib_free(rdev, &ib);
+               DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
+               return r;
+       }
+       r = radeon_fence_wait(ib.fence, false);
+       if (r) {
+               DRM_ERROR("radeon: fence wait failed (%d).\n", r);
+               return r;
+       }
+       for (i = 0; i < rdev->usec_timeout; i++) {
+               tmp = readl(ptr);
+               if (tmp == 0xDEADBEEF)
+                       break;
+               DRM_UDELAY(1);
+       }
+       if (i < rdev->usec_timeout) {
+               DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
+       } else {
+               DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp);
+               r = -EINVAL;
+       }
+       radeon_ib_free(rdev, &ib);
+       return r;
+}
+
+/**
+ * r600_dma_ring_ib_execute - Schedule an IB on the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to schedule
+ *
+ * Schedule an IB in the DMA ring (r6xx-r7xx).
+ */
+void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+       struct radeon_ring *ring = &rdev->ring[ib->ring];
+
+       if (rdev->wb.enabled) {
+               u32 next_rptr = ring->wptr + 4;
+               while ((next_rptr & 7) != 5)
+                       next_rptr++;
+               next_rptr += 3;
+               radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
+               radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+               radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
+               radeon_ring_write(ring, next_rptr);
+       }
+
+       /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
+        * Pad as necessary with NOPs.
+        */
+       while ((ring->wptr & 7) != 5)
+               radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+       radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0));
+       radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
+       radeon_ring_write(ring, (ib->length_dw << 16) | (upper_32_bits(ib->gpu_addr) & 0xFF));
+
+}
+
 /*
  * Interrupts
  *
@@ -2865,6 +3312,8 @@ static void r600_disable_interrupt_state(struct radeon_device *rdev)
        u32 tmp;
 
        WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+       tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
+       WREG32(DMA_CNTL, tmp);
        WREG32(GRBM_INT_CNTL, 0);
        WREG32(DxMODE_INT_MASK, 0);
        WREG32(D1GRPH_INTERRUPT_CONTROL, 0);
@@ -3006,6 +3455,7 @@ int r600_irq_set(struct radeon_device *rdev)
        u32 grbm_int_cntl = 0;
        u32 hdmi0, hdmi1;
        u32 d1grph = 0, d2grph = 0;
+       u32 dma_cntl;
 
        if (!rdev->irq.installed) {
                WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
@@ -3040,12 +3490,19 @@ int r600_irq_set(struct radeon_device *rdev)
                hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
                hdmi1 = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
        }
+       dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
 
        if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
                DRM_DEBUG("r600_irq_set: sw int\n");
                cp_int_cntl |= RB_INT_ENABLE;
                cp_int_cntl |= TIME_STAMP_INT_ENABLE;
        }
+
+       if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
+               DRM_DEBUG("r600_irq_set: sw int dma\n");
+               dma_cntl |= TRAP_ENABLE;
+       }
+
        if (rdev->irq.crtc_vblank_int[0] ||
            atomic_read(&rdev->irq.pflip[0])) {
                DRM_DEBUG("r600_irq_set: vblank 0\n");
@@ -3090,6 +3547,7 @@ int r600_irq_set(struct radeon_device *rdev)
        }
 
        WREG32(CP_INT_CNTL, cp_int_cntl);
+       WREG32(DMA_CNTL, dma_cntl);
        WREG32(DxMODE_INT_MASK, mode_int);
        WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph);
        WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph);
@@ -3469,6 +3927,10 @@ restart_ih:
                        DRM_DEBUG("IH: CP EOP\n");
                        radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
                        break;
+               case 224: /* DMA trap event */
+                       DRM_DEBUG("IH: DMA trap\n");
+                       radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
+                       break;
                case 233: /* GUI IDLE */
                        DRM_DEBUG("IH: GUI idle\n");
                        break;
index 2514123..be85f75 100644 (file)
@@ -721,12 +721,7 @@ static u32 r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
 
 static int r600_count_pipe_bits(uint32_t val)
 {
-       int i, ret = 0;
-       for (i = 0; i < 32; i++) {
-               ret += val & 1;
-               val >>= 1;
-       }
-       return ret;
+       return hweight32(val);
 }
 
 static void r600_gfx_init(struct drm_device *dev,
index 211c402..0be768b 100644 (file)
@@ -657,87 +657,30 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
                        /* nby is npipes htiles aligned == npipes * 8 pixel aligned */
                        nby = round_up(nby, track->npipes * 8);
                } else {
-                       /* htile widht & nby (8 or 4) make 2 bits number */
-                       tmp = track->htile_surface & 3;
+                       /* always assume 8x8 htile */
                        /* align is htile align * 8, htile align vary according to
                         * number of pipe and tile width and nby
                         */
                        switch (track->npipes) {
                        case 8:
-                               switch (tmp) {
-                               case 3: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
-                                       nbx = round_up(nbx, 64 * 8);
-                                       nby = round_up(nby, 64 * 8);
-                                       break;
-                               case 2: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 8*/
-                               case 1: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 4*/
-                                       nbx = round_up(nbx, 64 * 8);
-                                       nby = round_up(nby, 32 * 8);
-                                       break;
-                               case 0: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 4*/
-                                       nbx = round_up(nbx, 32 * 8);
-                                       nby = round_up(nby, 32 * 8);
-                                       break;
-                               default:
-                                       return -EINVAL;
-                               }
+                               /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
+                               nbx = round_up(nbx, 64 * 8);
+                               nby = round_up(nby, 64 * 8);
                                break;
                        case 4:
-                               switch (tmp) {
-                               case 3: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
-                                       nbx = round_up(nbx, 64 * 8);
-                                       nby = round_up(nby, 32 * 8);
-                                       break;
-                               case 2: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 8*/
-                               case 1: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 4*/
-                                       nbx = round_up(nbx, 32 * 8);
-                                       nby = round_up(nby, 32 * 8);
-                                       break;
-                               case 0: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 4*/
-                                       nbx = round_up(nbx, 32 * 8);
-                                       nby = round_up(nby, 16 * 8);
-                                       break;
-                               default:
-                                       return -EINVAL;
-                               }
+                               /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
+                               nbx = round_up(nbx, 64 * 8);
+                               nby = round_up(nby, 32 * 8);
                                break;
                        case 2:
-                               switch (tmp) {
-                               case 3: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
-                                       nbx = round_up(nbx, 32 * 8);
-                                       nby = round_up(nby, 32 * 8);
-                                       break;
-                               case 2: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 8*/
-                               case 1: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 4*/
-                                       nbx = round_up(nbx, 32 * 8);
-                                       nby = round_up(nby, 16 * 8);
-                                       break;
-                               case 0: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 4*/
-                                       nbx = round_up(nbx, 16 * 8);
-                                       nby = round_up(nby, 16 * 8);
-                                       break;
-                               default:
-                                       return -EINVAL;
-                               }
+                               /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
+                               nbx = round_up(nbx, 32 * 8);
+                               nby = round_up(nby, 32 * 8);
                                break;
                        case 1:
-                               switch (tmp) {
-                               case 3: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
-                                       nbx = round_up(nbx, 32 * 8);
-                                       nby = round_up(nby, 16 * 8);
-                                       break;
-                               case 2: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 8*/
-                               case 1: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 4*/
-                                       nbx = round_up(nbx, 16 * 8);
-                                       nby = round_up(nby, 16 * 8);
-                                       break;
-                               case 0: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 4*/
-                                       nbx = round_up(nbx, 16 * 8);
-                                       nby = round_up(nby, 8 * 8);
-                                       break;
-                               default:
-                                       return -EINVAL;
-                               }
+                               /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
+                               nbx = round_up(nbx, 32 * 8);
+                               nby = round_up(nby, 16 * 8);
                                break;
                        default:
                                dev_warn(p->dev, "%s:%d invalid num pipes %d\n",
@@ -746,9 +689,10 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
                        }
                }
                /* compute number of htile */
-               nbx = G_028D24_HTILE_WIDTH(track->htile_surface) ? nbx / 8 : nbx / 4;
-               nby = G_028D24_HTILE_HEIGHT(track->htile_surface) ? nby / 8 : nby / 4;
-               size = nbx * nby * 4;
+               nbx = nbx >> 3;
+               nby = nby >> 3;
+               /* size must be aligned on npipes * 2K boundary */
+               size = roundup(nbx * nby * 4, track->npipes * (2 << 10));
                size += track->htile_offset;
 
                if (size > radeon_bo_size(track->htile_bo)) {
@@ -1492,6 +1436,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
                break;
        case DB_HTILE_SURFACE:
                track->htile_surface = radeon_get_ib_value(p, idx);
+               /* force 8x8 htile width and height */
+               ib[idx] |= 3;
                track->db_dirty = true;
                break;
        case SQ_PGM_START_FS:
@@ -1949,6 +1895,78 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
                        ib[idx+2] = upper_32_bits(offset) & 0xff;
                }
                break;
+       case PACKET3_CP_DMA:
+       {
+               u32 command, size;
+               u64 offset, tmp;
+               if (pkt->count != 4) {
+                       DRM_ERROR("bad CP DMA\n");
+                       return -EINVAL;
+               }
+               command = radeon_get_ib_value(p, idx+4);
+               size = command & 0x1fffff;
+               if (command & PACKET3_CP_DMA_CMD_SAS) {
+                       /* src address space is register */
+                       DRM_ERROR("CP DMA SAS not supported\n");
+                       return -EINVAL;
+               } else {
+                       if (command & PACKET3_CP_DMA_CMD_SAIC) {
+                               DRM_ERROR("CP DMA SAIC only supported for registers\n");
+                               return -EINVAL;
+                       }
+                       /* src address space is memory */
+                       r = r600_cs_packet_next_reloc(p, &reloc);
+                       if (r) {
+                               DRM_ERROR("bad CP DMA SRC\n");
+                               return -EINVAL;
+                       }
+
+                       tmp = radeon_get_ib_value(p, idx) +
+                               ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
+
+                       offset = reloc->lobj.gpu_offset + tmp;
+
+                       if ((tmp + size) > radeon_bo_size(reloc->robj)) {
+                               dev_warn(p->dev, "CP DMA src buffer too small (%llu %lu)\n",
+                                        tmp + size, radeon_bo_size(reloc->robj));
+                               return -EINVAL;
+                       }
+
+                       ib[idx] = offset;
+                       ib[idx+1] = (ib[idx+1] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
+               }
+               if (command & PACKET3_CP_DMA_CMD_DAS) {
+                       /* dst address space is register */
+                       DRM_ERROR("CP DMA DAS not supported\n");
+                       return -EINVAL;
+               } else {
+                       /* dst address space is memory */
+                       if (command & PACKET3_CP_DMA_CMD_DAIC) {
+                               DRM_ERROR("CP DMA DAIC only supported for registers\n");
+                               return -EINVAL;
+                       }
+                       r = r600_cs_packet_next_reloc(p, &reloc);
+                       if (r) {
+                               DRM_ERROR("bad CP DMA DST\n");
+                               return -EINVAL;
+                       }
+
+                       tmp = radeon_get_ib_value(p, idx+2) +
+                               ((u64)(radeon_get_ib_value(p, idx+3) & 0xff) << 32);
+
+                       offset = reloc->lobj.gpu_offset + tmp;
+
+                       if ((tmp + size) > radeon_bo_size(reloc->robj)) {
+                               dev_warn(p->dev, "CP DMA dst buffer too small (%llu %lu)\n",
+                                        tmp + size, radeon_bo_size(reloc->robj));
+                               return -EINVAL;
+                       }
+
+                       ib[idx+2] = offset;
+                       ib[idx+3] = upper_32_bits(offset) & 0xff;
+               }
+               break;
+       }
        case PACKET3_SURFACE_SYNC:
                if (pkt->count != 3) {
                        DRM_ERROR("bad SURFACE_SYNC\n");
@@ -2496,3 +2514,196 @@ void r600_cs_legacy_init(void)
 {
        r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_nomm;
 }
+
+/*
+ *  DMA
+ */
+/**
+ * r600_dma_cs_next_reloc() - parse next reloc
+ * @p:         parser structure holding parsing context.
+ * @cs_reloc:          reloc informations
+ *
+ * Return the next reloc, do bo validation and compute
+ * GPU offset using the provided start.
+ **/
+int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
+                          struct radeon_cs_reloc **cs_reloc)
+{
+       struct radeon_cs_chunk *relocs_chunk;
+       unsigned idx;
+
+       if (p->chunk_relocs_idx == -1) {
+               DRM_ERROR("No relocation chunk !\n");
+               return -EINVAL;
+       }
+       *cs_reloc = NULL;
+       relocs_chunk = &p->chunks[p->chunk_relocs_idx];
+       idx = p->dma_reloc_idx;
+       if (idx >= relocs_chunk->length_dw) {
+               DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
+                         idx, relocs_chunk->length_dw);
+               return -EINVAL;
+       }
+       *cs_reloc = p->relocs_ptr[idx];
+       p->dma_reloc_idx++;
+       return 0;
+}
+
+#define GET_DMA_CMD(h) (((h) & 0xf0000000) >> 28)
+#define GET_DMA_COUNT(h) ((h) & 0x0000ffff)
+#define GET_DMA_T(h) (((h) & 0x00800000) >> 23)
+
+/**
+ * r600_dma_cs_parse() - parse the DMA IB
+ * @p:         parser structure holding parsing context.
+ *
+ * Parses the DMA IB from the CS ioctl and updates
+ * the GPU addresses based on the reloc information and
+ * checks for errors. (R6xx-R7xx)
+ * Returns 0 for success and an error on failure.
+ **/
+int r600_dma_cs_parse(struct radeon_cs_parser *p)
+{
+       struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
+       struct radeon_cs_reloc *src_reloc, *dst_reloc;
+       u32 header, cmd, count, tiled;
+       volatile u32 *ib = p->ib.ptr;
+       u32 idx, idx_value;
+       u64 src_offset, dst_offset;
+       int r;
+
+       do {
+               if (p->idx >= ib_chunk->length_dw) {
+                       DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
+                                 p->idx, ib_chunk->length_dw);
+                       return -EINVAL;
+               }
+               idx = p->idx;
+               header = radeon_get_ib_value(p, idx);
+               cmd = GET_DMA_CMD(header);
+               count = GET_DMA_COUNT(header);
+               tiled = GET_DMA_T(header);
+
+               switch (cmd) {
+               case DMA_PACKET_WRITE:
+                       r = r600_dma_cs_next_reloc(p, &dst_reloc);
+                       if (r) {
+                               DRM_ERROR("bad DMA_PACKET_WRITE\n");
+                               return -EINVAL;
+                       }
+                       if (tiled) {
+                               dst_offset = ib[idx+1];
+                               dst_offset <<= 8;
+
+                               ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+                               p->idx += count + 5;
+                       } else {
+                               dst_offset = ib[idx+1];
+                               dst_offset |= ((u64)(ib[idx+2] & 0xff)) << 32;
+
+                               ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+                               ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+                               p->idx += count + 3;
+                       }
+                       if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+                               dev_warn(p->dev, "DMA write buffer too small (%llu %lu)\n",
+                                        dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+                               return -EINVAL;
+                       }
+                       break;
+               case DMA_PACKET_COPY:
+                       r = r600_dma_cs_next_reloc(p, &src_reloc);
+                       if (r) {
+                               DRM_ERROR("bad DMA_PACKET_COPY\n");
+                               return -EINVAL;
+                       }
+                       r = r600_dma_cs_next_reloc(p, &dst_reloc);
+                       if (r) {
+                               DRM_ERROR("bad DMA_PACKET_COPY\n");
+                               return -EINVAL;
+                       }
+                       if (tiled) {
+                               idx_value = radeon_get_ib_value(p, idx + 2);
+                               /* detile bit */
+                               if (idx_value & (1 << 31)) {
+                                       /* tiled src, linear dst */
+                                       src_offset = ib[idx+1];
+                                       src_offset <<= 8;
+                                       ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+
+                                       dst_offset = ib[idx+5];
+                                       dst_offset |= ((u64)(ib[idx+6] & 0xff)) << 32;
+                                       ib[idx+5] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+                                       ib[idx+6] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+                               } else {
+                                       /* linear src, tiled dst */
+                                       src_offset = ib[idx+5];
+                                       src_offset |= ((u64)(ib[idx+6] & 0xff)) << 32;
+                                       ib[idx+5] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+                                       ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+
+                                       dst_offset = ib[idx+1];
+                                       dst_offset <<= 8;
+                                       ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+                               }
+                               p->idx += 7;
+                       } else {
+                               src_offset = ib[idx+2];
+                               src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
+                               dst_offset = ib[idx+1];
+                               dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32;
+
+                               ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+                               ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+                               ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+                               ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+                               p->idx += 5;
+                       }
+                       if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+                               dev_warn(p->dev, "DMA copy src buffer too small (%llu %lu)\n",
+                                        src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+                               return -EINVAL;
+                       }
+                       if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+                               dev_warn(p->dev, "DMA write dst buffer too small (%llu %lu)\n",
+                                        dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+                               return -EINVAL;
+                       }
+                       break;
+               case DMA_PACKET_CONSTANT_FILL:
+                       if (p->family < CHIP_RV770) {
+                               DRM_ERROR("Constant Fill is 7xx only !\n");
+                               return -EINVAL;
+                       }
+                       r = r600_dma_cs_next_reloc(p, &dst_reloc);
+                       if (r) {
+                               DRM_ERROR("bad DMA_PACKET_WRITE\n");
+                               return -EINVAL;
+                       }
+                       dst_offset = ib[idx+1];
+                       dst_offset |= ((u64)(ib[idx+3] & 0x00ff0000)) << 16;
+                       if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+                               dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
+                                        dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+                               return -EINVAL;
+                       }
+                       ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+                       ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) << 16) & 0x00ff0000;
+                       p->idx += 4;
+                       break;
+               case DMA_PACKET_NOP:
+                       p->idx += 1;
+                       break;
+               default:
+                       DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
+                       return -EINVAL;
+               }
+       } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+#if 0
+       for (r = 0; r < p->ib->length_dw; r++) {
+               printk(KERN_INFO "%05d  0x%08X\n", r, p->ib.ptr[r]);
+               mdelay(1);
+       }
+#endif
+       return 0;
+}
index 2b960cb..909219b 100644 (file)
 #define R600_CONFIG_F0_BASE                                     0x542C
 #define R600_CONFIG_APER_SIZE                                   0x5430
 
+#define        R600_BIF_FB_EN                                          0x5490
+#define                R600_FB_READ_EN                                 (1 << 0)
+#define                R600_FB_WRITE_EN                                (1 << 1)
+
+#define R600_CITF_CNTL                                         0x200c
+#define                R600_BLACKOUT_MASK                              0x00000003
+
+#define R700_MC_CITF_CNTL                                      0x25c0
+
 #define R600_ROM_CNTL                              0x1600
 #       define R600_SCK_OVERWRITE                  (1 << 1)
 #       define R600_SCK_PRESCALE_CRYSTAL_CLK_SHIFT 28
index fa6f370..4a53402 100644 (file)
 #define         WAIT_2D_IDLECLEAN_bit                           (1 << 16)
 #define         WAIT_3D_IDLECLEAN_bit                           (1 << 17)
 
+/* async DMA */
+#define DMA_TILING_CONFIG                                 0x3ec4
+#define DMA_CONFIG                                        0x3e4c
+
+#define DMA_RB_CNTL                                       0xd000
+#       define DMA_RB_ENABLE                              (1 << 0)
+#       define DMA_RB_SIZE(x)                             ((x) << 1) /* log2 */
+#       define DMA_RB_SWAP_ENABLE                         (1 << 9) /* 8IN32 */
+#       define DMA_RPTR_WRITEBACK_ENABLE                  (1 << 12)
+#       define DMA_RPTR_WRITEBACK_SWAP_ENABLE             (1 << 13)  /* 8IN32 */
+#       define DMA_RPTR_WRITEBACK_TIMER(x)                ((x) << 16) /* log2 */
+#define DMA_RB_BASE                                       0xd004
+#define DMA_RB_RPTR                                       0xd008
+#define DMA_RB_WPTR                                       0xd00c
+
+#define DMA_RB_RPTR_ADDR_HI                               0xd01c
+#define DMA_RB_RPTR_ADDR_LO                               0xd020
+
+#define DMA_IB_CNTL                                       0xd024
+#       define DMA_IB_ENABLE                              (1 << 0)
+#       define DMA_IB_SWAP_ENABLE                         (1 << 4)
+#define DMA_IB_RPTR                                       0xd028
+#define DMA_CNTL                                          0xd02c
+#       define TRAP_ENABLE                                (1 << 0)
+#       define SEM_INCOMPLETE_INT_ENABLE                  (1 << 1)
+#       define SEM_WAIT_INT_ENABLE                        (1 << 2)
+#       define DATA_SWAP_ENABLE                           (1 << 3)
+#       define FENCE_SWAP_ENABLE                          (1 << 4)
+#       define CTXEMPTY_INT_ENABLE                        (1 << 28)
+#define DMA_STATUS_REG                                    0xd034
+#       define DMA_IDLE                                   (1 << 0)
+#define DMA_SEM_INCOMPLETE_TIMER_CNTL                     0xd044
+#define DMA_SEM_WAIT_FAIL_TIMER_CNTL                      0xd048
+#define DMA_MODE                                          0xd0bc
+
+/* async DMA packets */
+#define DMA_PACKET(cmd, t, s, n)       ((((cmd) & 0xF) << 28) |        \
+                                        (((t) & 0x1) << 23) |          \
+                                        (((s) & 0x1) << 22) |          \
+                                        (((n) & 0xFFFF) << 0))
+/* async DMA Packet types */
+#define        DMA_PACKET_WRITE                                  0x2
+#define        DMA_PACKET_COPY                                   0x3
+#define        DMA_PACKET_INDIRECT_BUFFER                        0x4
+#define        DMA_PACKET_SEMAPHORE                              0x5
+#define        DMA_PACKET_FENCE                                  0x6
+#define        DMA_PACKET_TRAP                                   0x7
+#define        DMA_PACKET_CONSTANT_FILL                          0xd /* 7xx only */
+#define        DMA_PACKET_NOP                                    0xf
+
 #define IH_RB_CNTL                                        0x3e00
 #       define IH_RB_ENABLE                               (1 << 0)
-#       define IH_IB_SIZE(x)                              ((x) << 1) /* log2 */
+#       define IH_RB_SIZE(x)                              ((x) << 1) /* log2 */
 #       define IH_RB_FULL_DRAIN_ENABLE                    (1 << 6)
 #       define IH_WPTR_WRITEBACK_ENABLE                   (1 << 8)
 #       define IH_WPTR_WRITEBACK_TIMER(x)                 ((x) << 9) /* log2 */
 #define TN_RLC_CLEAR_STATE_RESTORE_BASE                   0x3f20
 
 #define SRBM_SOFT_RESET                                   0xe60
+#       define SOFT_RESET_DMA                             (1 << 12)
 #       define SOFT_RESET_RLC                             (1 << 13)
+#       define RV770_SOFT_RESET_DMA                       (1 << 20)
 
 #define CP_INT_CNTL                                       0xc124
 #       define CNTX_BUSY_INT_ENABLE                       (1 << 19)
 #define        PACKET3_WAIT_REG_MEM                            0x3C
 #define        PACKET3_MEM_WRITE                               0x3D
 #define        PACKET3_INDIRECT_BUFFER                         0x32
+#define        PACKET3_CP_DMA                                  0x41
+/* 1. header
+ * 2. SRC_ADDR_LO [31:0]
+ * 3. CP_SYNC [31] | SRC_ADDR_HI [7:0]
+ * 4. DST_ADDR_LO [31:0]
+ * 5. DST_ADDR_HI [7:0]
+ * 6. COMMAND [29:22] | BYTE_COUNT [20:0]
+ */
+#              define PACKET3_CP_DMA_CP_SYNC       (1 << 31)
+/* COMMAND */
+#              define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23)
+                /* 0 - none
+                * 1 - 8 in 16
+                * 2 - 8 in 32
+                * 3 - 8 in 64
+                */
+#              define PACKET3_CP_DMA_CMD_DST_SWAP(x) ((x) << 24)
+                /* 0 - none
+                * 1 - 8 in 16
+                * 2 - 8 in 32
+                * 3 - 8 in 64
+                */
+#              define PACKET3_CP_DMA_CMD_SAS       (1 << 26)
+                /* 0 - memory
+                * 1 - register
+                */
+#              define PACKET3_CP_DMA_CMD_DAS       (1 << 27)
+                /* 0 - memory
+                * 1 - register
+                */
+#              define PACKET3_CP_DMA_CMD_SAIC      (1 << 28)
+#              define PACKET3_CP_DMA_CMD_DAIC      (1 << 29)
 #define        PACKET3_SURFACE_SYNC                            0x43
 #              define PACKET3_CB0_DEST_BASE_ENA    (1 << 6)
 #              define PACKET3_TC_ACTION_ENA        (1 << 23)
index 8c42d54..5dc744d 100644 (file)
@@ -109,7 +109,7 @@ extern int radeon_lockup_timeout;
 #define RADEON_BIOS_NUM_SCRATCH                        8
 
 /* max number of rings */
-#define RADEON_NUM_RINGS                       3
+#define RADEON_NUM_RINGS                       5
 
 /* fence seq are set to this number when signaled */
 #define RADEON_FENCE_SIGNALED_SEQ              0LL
@@ -122,6 +122,11 @@ extern int radeon_lockup_timeout;
 #define CAYMAN_RING_TYPE_CP1_INDEX             1
 #define CAYMAN_RING_TYPE_CP2_INDEX             2
 
+/* R600+ has an async dma ring */
+#define R600_RING_TYPE_DMA_INDEX               3
+/* cayman add a second async dma ring */
+#define CAYMAN_RING_TYPE_DMA1_INDEX            4
+
 /* hardcode those limit for now */
 #define RADEON_VA_IB_OFFSET                    (1 << 20)
 #define RADEON_VA_RESERVED_SIZE                        (8 << 20)
@@ -313,6 +318,7 @@ struct radeon_bo {
        struct list_head                list;
        /* Protected by tbo.reserved */
        u32                             placements[3];
+       u32                             busy_placements[3];
        struct ttm_placement            placement;
        struct ttm_buffer_object        tbo;
        struct ttm_bo_kmap_obj          kmap;
@@ -787,6 +793,15 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigne
 void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *cp);
 
 
+/* r600 async dma */
+void r600_dma_stop(struct radeon_device *rdev);
+int r600_dma_resume(struct radeon_device *rdev);
+void r600_dma_fini(struct radeon_device *rdev);
+
+void cayman_dma_stop(struct radeon_device *rdev);
+int cayman_dma_resume(struct radeon_device *rdev);
+void cayman_dma_fini(struct radeon_device *rdev);
+
 /*
  * CS.
  */
@@ -824,6 +839,7 @@ struct radeon_cs_parser {
        struct radeon_cs_reloc  *relocs;
        struct radeon_cs_reloc  **relocs_ptr;
        struct list_head        validated;
+       unsigned                dma_reloc_idx;
        /* indices of various chunks */
        int                     chunk_ib_idx;
        int                     chunk_relocs_idx;
@@ -883,7 +899,9 @@ struct radeon_wb {
 #define RADEON_WB_CP_RPTR_OFFSET 1024
 #define RADEON_WB_CP1_RPTR_OFFSET 1280
 #define RADEON_WB_CP2_RPTR_OFFSET 1536
+#define R600_WB_DMA_RPTR_OFFSET   1792
 #define R600_WB_IH_WPTR_OFFSET   2048
+#define CAYMAN_WB_DMA1_RPTR_OFFSET   2304
 #define R600_WB_EVENT_OFFSET     3072
 
 /**
@@ -1539,6 +1557,8 @@ struct radeon_device {
        /* Register mmio */
        resource_size_t                 rmmio_base;
        resource_size_t                 rmmio_size;
+       /* protects concurrent MM_INDEX/DATA based register access */
+       spinlock_t mmio_idx_lock;
        void __iomem                    *rmmio;
        radeon_rreg_t                   mc_rreg;
        radeon_wreg_t                   mc_wreg;
@@ -1614,8 +1634,10 @@ int radeon_device_init(struct radeon_device *rdev,
 void radeon_device_fini(struct radeon_device *rdev);
 int radeon_gpu_wait_for_idle(struct radeon_device *rdev);
 
-uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg);
-void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg,
+                     bool always_indirect);
+void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
+                 bool always_indirect);
 u32 r100_io_rreg(struct radeon_device *rdev, u32 reg);
 void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
 
@@ -1631,9 +1653,11 @@ void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
 #define WREG8(reg, v) writeb(v, (rdev->rmmio) + (reg))
 #define RREG16(reg) readw((rdev->rmmio) + (reg))
 #define WREG16(reg, v) writew(v, (rdev->rmmio) + (reg))
-#define RREG32(reg) r100_mm_rreg(rdev, (reg))
-#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", r100_mm_rreg(rdev, (reg)))
-#define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v))
+#define RREG32(reg) r100_mm_rreg(rdev, (reg), false)
+#define RREG32_IDX(reg) r100_mm_rreg(rdev, (reg), true)
+#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", r100_mm_rreg(rdev, (reg), false))
+#define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v), false)
+#define WREG32_IDX(reg, v) r100_mm_wreg(rdev, (reg), (v), true)
 #define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
 #define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
 #define RREG32_PLL(reg) rdev->pll_rreg(rdev, (reg))
@@ -1658,7 +1682,7 @@ void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
                tmp_ |= ((val) & ~(mask));                      \
                WREG32_PLL(reg, tmp_);                          \
        } while (0)
-#define DREG32_SYS(sqf, rdev, reg) seq_printf((sqf), #reg " : 0x%08X\n", r100_mm_rreg((rdev), (reg)))
+#define DREG32_SYS(sqf, rdev, reg) seq_printf((sqf), #reg " : 0x%08X\n", r100_mm_rreg((rdev), (reg), false))
 #define RREG32_IO(reg) r100_io_rreg(rdev, (reg))
 #define WREG32_IO(reg, v) r100_io_wreg(rdev, (reg), (v))
 
index 654520b..596bcbe 100644 (file)
@@ -947,6 +947,15 @@ static struct radeon_asic r600_asic = {
                        .ring_test = &r600_ring_test,
                        .ib_test = &r600_ib_test,
                        .is_lockup = &r600_gpu_is_lockup,
+               },
+               [R600_RING_TYPE_DMA_INDEX] = {
+                       .ib_execute = &r600_dma_ring_ib_execute,
+                       .emit_fence = &r600_dma_fence_ring_emit,
+                       .emit_semaphore = &r600_dma_semaphore_ring_emit,
+                       .cs_parse = &r600_dma_cs_parse,
+                       .ring_test = &r600_dma_ring_test,
+                       .ib_test = &r600_dma_ib_test,
+                       .is_lockup = &r600_dma_is_lockup,
                }
        },
        .irq = {
@@ -963,10 +972,10 @@ static struct radeon_asic r600_asic = {
        .copy = {
                .blit = &r600_copy_blit,
                .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
-               .dma = NULL,
-               .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
-               .copy = &r600_copy_blit,
-               .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+               .dma = &r600_copy_dma,
+               .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+               .copy = &r600_copy_dma,
+               .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
        },
        .surface = {
                .set_reg = r600_set_surface_reg,
@@ -1022,6 +1031,15 @@ static struct radeon_asic rs780_asic = {
                        .ring_test = &r600_ring_test,
                        .ib_test = &r600_ib_test,
                        .is_lockup = &r600_gpu_is_lockup,
+               },
+               [R600_RING_TYPE_DMA_INDEX] = {
+                       .ib_execute = &r600_dma_ring_ib_execute,
+                       .emit_fence = &r600_dma_fence_ring_emit,
+                       .emit_semaphore = &r600_dma_semaphore_ring_emit,
+                       .cs_parse = &r600_dma_cs_parse,
+                       .ring_test = &r600_dma_ring_test,
+                       .ib_test = &r600_dma_ib_test,
+                       .is_lockup = &r600_dma_is_lockup,
                }
        },
        .irq = {
@@ -1038,10 +1056,10 @@ static struct radeon_asic rs780_asic = {
        .copy = {
                .blit = &r600_copy_blit,
                .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
-               .dma = NULL,
-               .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
-               .copy = &r600_copy_blit,
-               .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+               .dma = &r600_copy_dma,
+               .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+               .copy = &r600_copy_dma,
+               .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
        },
        .surface = {
                .set_reg = r600_set_surface_reg,
@@ -1097,6 +1115,15 @@ static struct radeon_asic rv770_asic = {
                        .ring_test = &r600_ring_test,
                        .ib_test = &r600_ib_test,
                        .is_lockup = &r600_gpu_is_lockup,
+               },
+               [R600_RING_TYPE_DMA_INDEX] = {
+                       .ib_execute = &r600_dma_ring_ib_execute,
+                       .emit_fence = &r600_dma_fence_ring_emit,
+                       .emit_semaphore = &r600_dma_semaphore_ring_emit,
+                       .cs_parse = &r600_dma_cs_parse,
+                       .ring_test = &r600_dma_ring_test,
+                       .ib_test = &r600_dma_ib_test,
+                       .is_lockup = &r600_dma_is_lockup,
                }
        },
        .irq = {
@@ -1113,10 +1140,10 @@ static struct radeon_asic rv770_asic = {
        .copy = {
                .blit = &r600_copy_blit,
                .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
-               .dma = NULL,
-               .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
-               .copy = &r600_copy_blit,
-               .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+               .dma = &r600_copy_dma,
+               .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+               .copy = &r600_copy_dma,
+               .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
        },
        .surface = {
                .set_reg = r600_set_surface_reg,
@@ -1172,6 +1199,15 @@ static struct radeon_asic evergreen_asic = {
                        .ring_test = &r600_ring_test,
                        .ib_test = &r600_ib_test,
                        .is_lockup = &evergreen_gpu_is_lockup,
+               },
+               [R600_RING_TYPE_DMA_INDEX] = {
+                       .ib_execute = &evergreen_dma_ring_ib_execute,
+                       .emit_fence = &evergreen_dma_fence_ring_emit,
+                       .emit_semaphore = &r600_dma_semaphore_ring_emit,
+                       .cs_parse = &evergreen_dma_cs_parse,
+                       .ring_test = &r600_dma_ring_test,
+                       .ib_test = &r600_dma_ib_test,
+                       .is_lockup = &r600_dma_is_lockup,
                }
        },
        .irq = {
@@ -1188,10 +1224,10 @@ static struct radeon_asic evergreen_asic = {
        .copy = {
                .blit = &r600_copy_blit,
                .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
-               .dma = NULL,
-               .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
-               .copy = &r600_copy_blit,
-               .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+               .dma = &evergreen_copy_dma,
+               .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+               .copy = &evergreen_copy_dma,
+               .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
        },
        .surface = {
                .set_reg = r600_set_surface_reg,
@@ -1248,6 +1284,15 @@ static struct radeon_asic sumo_asic = {
                        .ib_test = &r600_ib_test,
                        .is_lockup = &evergreen_gpu_is_lockup,
                },
+               [R600_RING_TYPE_DMA_INDEX] = {
+                       .ib_execute = &evergreen_dma_ring_ib_execute,
+                       .emit_fence = &evergreen_dma_fence_ring_emit,
+                       .emit_semaphore = &r600_dma_semaphore_ring_emit,
+                       .cs_parse = &evergreen_dma_cs_parse,
+                       .ring_test = &r600_dma_ring_test,
+                       .ib_test = &r600_dma_ib_test,
+                       .is_lockup = &r600_dma_is_lockup,
+               }
        },
        .irq = {
                .set = &evergreen_irq_set,
@@ -1263,10 +1308,10 @@ static struct radeon_asic sumo_asic = {
        .copy = {
                .blit = &r600_copy_blit,
                .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
-               .dma = NULL,
-               .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
-               .copy = &r600_copy_blit,
-               .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+               .dma = &evergreen_copy_dma,
+               .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+               .copy = &evergreen_copy_dma,
+               .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
        },
        .surface = {
                .set_reg = r600_set_surface_reg,
@@ -1322,6 +1367,15 @@ static struct radeon_asic btc_asic = {
                        .ring_test = &r600_ring_test,
                        .ib_test = &r600_ib_test,
                        .is_lockup = &evergreen_gpu_is_lockup,
+               },
+               [R600_RING_TYPE_DMA_INDEX] = {
+                       .ib_execute = &evergreen_dma_ring_ib_execute,
+                       .emit_fence = &evergreen_dma_fence_ring_emit,
+                       .emit_semaphore = &r600_dma_semaphore_ring_emit,
+                       .cs_parse = &evergreen_dma_cs_parse,
+                       .ring_test = &r600_dma_ring_test,
+                       .ib_test = &r600_dma_ib_test,
+                       .is_lockup = &r600_dma_is_lockup,
                }
        },
        .irq = {
@@ -1338,10 +1392,10 @@ static struct radeon_asic btc_asic = {
        .copy = {
                .blit = &r600_copy_blit,
                .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
-               .dma = NULL,
-               .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
-               .copy = &r600_copy_blit,
-               .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+               .dma = &evergreen_copy_dma,
+               .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+               .copy = &evergreen_copy_dma,
+               .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
        },
        .surface = {
                .set_reg = r600_set_surface_reg,
@@ -1391,7 +1445,7 @@ static struct radeon_asic cayman_asic = {
        .vm = {
                .init = &cayman_vm_init,
                .fini = &cayman_vm_fini,
-               .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+               .pt_ring_index = R600_RING_TYPE_DMA_INDEX,
                .set_page = &cayman_vm_set_page,
        },
        .ring = {
@@ -1427,6 +1481,28 @@ static struct radeon_asic cayman_asic = {
                        .ib_test = &r600_ib_test,
                        .is_lockup = &evergreen_gpu_is_lockup,
                        .vm_flush = &cayman_vm_flush,
+               },
+               [R600_RING_TYPE_DMA_INDEX] = {
+                       .ib_execute = &cayman_dma_ring_ib_execute,
+                       .ib_parse = &evergreen_dma_ib_parse,
+                       .emit_fence = &evergreen_dma_fence_ring_emit,
+                       .emit_semaphore = &r600_dma_semaphore_ring_emit,
+                       .cs_parse = &evergreen_dma_cs_parse,
+                       .ring_test = &r600_dma_ring_test,
+                       .ib_test = &r600_dma_ib_test,
+                       .is_lockup = &cayman_dma_is_lockup,
+                       .vm_flush = &cayman_dma_vm_flush,
+               },
+               [CAYMAN_RING_TYPE_DMA1_INDEX] = {
+                       .ib_execute = &cayman_dma_ring_ib_execute,
+                       .ib_parse = &evergreen_dma_ib_parse,
+                       .emit_fence = &evergreen_dma_fence_ring_emit,
+                       .emit_semaphore = &r600_dma_semaphore_ring_emit,
+                       .cs_parse = &evergreen_dma_cs_parse,
+                       .ring_test = &r600_dma_ring_test,
+                       .ib_test = &r600_dma_ib_test,
+                       .is_lockup = &cayman_dma_is_lockup,
+                       .vm_flush = &cayman_dma_vm_flush,
                }
        },
        .irq = {
@@ -1443,10 +1519,10 @@ static struct radeon_asic cayman_asic = {
        .copy = {
                .blit = &r600_copy_blit,
                .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
-               .dma = NULL,
-               .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
-               .copy = &r600_copy_blit,
-               .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+               .dma = &evergreen_copy_dma,
+               .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+               .copy = &evergreen_copy_dma,
+               .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
        },
        .surface = {
                .set_reg = r600_set_surface_reg,
@@ -1496,7 +1572,7 @@ static struct radeon_asic trinity_asic = {
        .vm = {
                .init = &cayman_vm_init,
                .fini = &cayman_vm_fini,
-               .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+               .pt_ring_index = R600_RING_TYPE_DMA_INDEX,
                .set_page = &cayman_vm_set_page,
        },
        .ring = {
@@ -1532,6 +1608,28 @@ static struct radeon_asic trinity_asic = {
                        .ib_test = &r600_ib_test,
                        .is_lockup = &evergreen_gpu_is_lockup,
                        .vm_flush = &cayman_vm_flush,
+               },
+               [R600_RING_TYPE_DMA_INDEX] = {
+                       .ib_execute = &cayman_dma_ring_ib_execute,
+                       .ib_parse = &evergreen_dma_ib_parse,
+                       .emit_fence = &evergreen_dma_fence_ring_emit,
+                       .emit_semaphore = &r600_dma_semaphore_ring_emit,
+                       .cs_parse = &evergreen_dma_cs_parse,
+                       .ring_test = &r600_dma_ring_test,
+                       .ib_test = &r600_dma_ib_test,
+                       .is_lockup = &cayman_dma_is_lockup,
+                       .vm_flush = &cayman_dma_vm_flush,
+               },
+               [CAYMAN_RING_TYPE_DMA1_INDEX] = {
+                       .ib_execute = &cayman_dma_ring_ib_execute,
+                       .ib_parse = &evergreen_dma_ib_parse,
+                       .emit_fence = &evergreen_dma_fence_ring_emit,
+                       .emit_semaphore = &r600_dma_semaphore_ring_emit,
+                       .cs_parse = &evergreen_dma_cs_parse,
+                       .ring_test = &r600_dma_ring_test,
+                       .ib_test = &r600_dma_ib_test,
+                       .is_lockup = &cayman_dma_is_lockup,
+                       .vm_flush = &cayman_dma_vm_flush,
                }
        },
        .irq = {
@@ -1548,10 +1646,10 @@ static struct radeon_asic trinity_asic = {
        .copy = {
                .blit = &r600_copy_blit,
                .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
-               .dma = NULL,
-               .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
-               .copy = &r600_copy_blit,
-               .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+               .dma = &evergreen_copy_dma,
+               .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+               .copy = &evergreen_copy_dma,
+               .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
        },
        .surface = {
                .set_reg = r600_set_surface_reg,
@@ -1601,7 +1699,7 @@ static struct radeon_asic si_asic = {
        .vm = {
                .init = &si_vm_init,
                .fini = &si_vm_fini,
-               .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+               .pt_ring_index = R600_RING_TYPE_DMA_INDEX,
                .set_page = &si_vm_set_page,
        },
        .ring = {
@@ -1637,6 +1735,28 @@ static struct radeon_asic si_asic = {
                        .ib_test = &r600_ib_test,
                        .is_lockup = &si_gpu_is_lockup,
                        .vm_flush = &si_vm_flush,
+               },
+               [R600_RING_TYPE_DMA_INDEX] = {
+                       .ib_execute = &cayman_dma_ring_ib_execute,
+                       .ib_parse = &evergreen_dma_ib_parse,
+                       .emit_fence = &evergreen_dma_fence_ring_emit,
+                       .emit_semaphore = &r600_dma_semaphore_ring_emit,
+                       .cs_parse = NULL,
+                       .ring_test = &r600_dma_ring_test,
+                       .ib_test = &r600_dma_ib_test,
+                       .is_lockup = &cayman_dma_is_lockup,
+                       .vm_flush = &si_dma_vm_flush,
+               },
+               [CAYMAN_RING_TYPE_DMA1_INDEX] = {
+                       .ib_execute = &cayman_dma_ring_ib_execute,
+                       .ib_parse = &evergreen_dma_ib_parse,
+                       .emit_fence = &evergreen_dma_fence_ring_emit,
+                       .emit_semaphore = &r600_dma_semaphore_ring_emit,
+                       .cs_parse = NULL,
+                       .ring_test = &r600_dma_ring_test,
+                       .ib_test = &r600_dma_ib_test,
+                       .is_lockup = &cayman_dma_is_lockup,
+                       .vm_flush = &si_dma_vm_flush,
                }
        },
        .irq = {
@@ -1653,10 +1773,10 @@ static struct radeon_asic si_asic = {
        .copy = {
                .blit = NULL,
                .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
-               .dma = NULL,
-               .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
-               .copy = NULL,
-               .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+               .dma = &si_copy_dma,
+               .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
+               .copy = &si_copy_dma,
+               .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
        },
        .surface = {
                .set_reg = r600_set_surface_reg,
index 5e3a0e5..5f4882c 100644 (file)
@@ -263,6 +263,7 @@ extern int rs690_mc_wait_for_idle(struct radeon_device *rdev);
 struct rv515_mc_save {
        u32 vga_render_control;
        u32 vga_hdp_control;
+       bool crtc_enabled[2];
 };
 
 int rv515_init(struct radeon_device *rdev);
@@ -303,12 +304,21 @@ void r600_pcie_gart_tlb_flush(struct radeon_device *rdev);
 uint32_t r600_pciep_rreg(struct radeon_device *rdev, uint32_t reg);
 void r600_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
 int r600_cs_parse(struct radeon_cs_parser *p);
+int r600_dma_cs_parse(struct radeon_cs_parser *p);
 void r600_fence_ring_emit(struct radeon_device *rdev,
                          struct radeon_fence *fence);
 void r600_semaphore_ring_emit(struct radeon_device *rdev,
                              struct radeon_ring *cp,
                              struct radeon_semaphore *semaphore,
                              bool emit_wait);
+void r600_dma_fence_ring_emit(struct radeon_device *rdev,
+                             struct radeon_fence *fence);
+void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
+                                 struct radeon_ring *ring,
+                                 struct radeon_semaphore *semaphore,
+                                 bool emit_wait);
+void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
+bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
 bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
 int r600_asic_reset(struct radeon_device *rdev);
 int r600_set_surface_reg(struct radeon_device *rdev, int reg,
@@ -316,11 +326,16 @@ int r600_set_surface_reg(struct radeon_device *rdev, int reg,
                         uint32_t offset, uint32_t obj_size);
 void r600_clear_surface_reg(struct radeon_device *rdev, int reg);
 int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
+int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
 void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
 int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
+int r600_dma_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
 int r600_copy_blit(struct radeon_device *rdev,
                   uint64_t src_offset, uint64_t dst_offset,
                   unsigned num_gpu_pages, struct radeon_fence **fence);
+int r600_copy_dma(struct radeon_device *rdev,
+                 uint64_t src_offset, uint64_t dst_offset,
+                 unsigned num_gpu_pages, struct radeon_fence **fence);
 void r600_hpd_init(struct radeon_device *rdev);
 void r600_hpd_fini(struct radeon_device *rdev);
 bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
@@ -416,6 +431,7 @@ u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc);
 int evergreen_irq_set(struct radeon_device *rdev);
 int evergreen_irq_process(struct radeon_device *rdev);
 extern int evergreen_cs_parse(struct radeon_cs_parser *p);
+extern int evergreen_dma_cs_parse(struct radeon_cs_parser *p);
 extern void evergreen_pm_misc(struct radeon_device *rdev);
 extern void evergreen_pm_prepare(struct radeon_device *rdev);
 extern void evergreen_pm_finish(struct radeon_device *rdev);
@@ -428,6 +444,14 @@ extern void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc);
 void evergreen_disable_interrupt_state(struct radeon_device *rdev);
 int evergreen_blit_init(struct radeon_device *rdev);
 int evergreen_mc_wait_for_idle(struct radeon_device *rdev);
+void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
+                                  struct radeon_fence *fence);
+void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
+                                  struct radeon_ib *ib);
+int evergreen_copy_dma(struct radeon_device *rdev,
+                      uint64_t src_offset, uint64_t dst_offset,
+                      unsigned num_gpu_pages,
+                      struct radeon_fence **fence);
 
 /*
  * cayman
@@ -449,6 +473,11 @@ void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
                        uint64_t addr, unsigned count,
                        uint32_t incr, uint32_t flags);
 int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
+int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
+void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
+                               struct radeon_ib *ib);
+bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
+void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
 
 /* DCE6 - SI */
 void dce6_bandwidth_update(struct radeon_device *rdev);
@@ -476,5 +505,10 @@ void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
 void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
 int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
 uint64_t si_get_gpu_clock(struct radeon_device *rdev);
+int si_copy_dma(struct radeon_device *rdev,
+               uint64_t src_offset, uint64_t dst_offset,
+               unsigned num_gpu_pages,
+               struct radeon_fence **fence);
+void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
 
 #endif
index 45b660b..4af8912 100644 (file)
@@ -3246,11 +3246,9 @@ static uint32_t combios_detect_ram(struct drm_device *dev, int ram,
        while (ram--) {
                addr = ram * 1024 * 1024;
                /* write to each page */
-               WREG32(RADEON_MM_INDEX, (addr) | RADEON_MM_APER);
-               WREG32(RADEON_MM_DATA, 0xdeadbeef);
+               WREG32_IDX((addr) | RADEON_MM_APER, 0xdeadbeef);
                /* read back and verify */
-               WREG32(RADEON_MM_INDEX, (addr) | RADEON_MM_APER);
-               if (RREG32(RADEON_MM_DATA) != 0xdeadbeef)
+               if (RREG32_IDX((addr) | RADEON_MM_APER) != 0xdeadbeef)
                        return 0;
        }
 
index b884c36..47bf162 100644 (file)
@@ -1599,7 +1599,7 @@ radeon_add_atom_connector(struct drm_device *dev,
                        connector->interlace_allowed = true;
                        connector->doublescan_allowed = true;
                        radeon_connector->dac_load_detect = true;
-                       drm_connector_attach_property(&radeon_connector->base,
+                       drm_object_attach_property(&radeon_connector->base.base,
                                                      rdev->mode_info.load_detect_property,
                                                      1);
                        break;
@@ -1608,13 +1608,13 @@ radeon_add_atom_connector(struct drm_device *dev,
                case DRM_MODE_CONNECTOR_HDMIA:
                case DRM_MODE_CONNECTOR_HDMIB:
                case DRM_MODE_CONNECTOR_DisplayPort:
-                       drm_connector_attach_property(&radeon_connector->base,
+                       drm_object_attach_property(&radeon_connector->base.base,
                                                      rdev->mode_info.underscan_property,
                                                      UNDERSCAN_OFF);
-                       drm_connector_attach_property(&radeon_connector->base,
+                       drm_object_attach_property(&radeon_connector->base.base,
                                                      rdev->mode_info.underscan_hborder_property,
                                                      0);
-                       drm_connector_attach_property(&radeon_connector->base,
+                       drm_object_attach_property(&radeon_connector->base.base,
                                                      rdev->mode_info.underscan_vborder_property,
                                                      0);
                        subpixel_order = SubPixelHorizontalRGB;
@@ -1625,14 +1625,14 @@ radeon_add_atom_connector(struct drm_device *dev,
                                connector->doublescan_allowed = false;
                        if (connector_type == DRM_MODE_CONNECTOR_DVII) {
                                radeon_connector->dac_load_detect = true;
-                               drm_connector_attach_property(&radeon_connector->base,
+                               drm_object_attach_property(&radeon_connector->base.base,
                                                              rdev->mode_info.load_detect_property,
                                                              1);
                        }
                        break;
                case DRM_MODE_CONNECTOR_LVDS:
                case DRM_MODE_CONNECTOR_eDP:
-                       drm_connector_attach_property(&radeon_connector->base,
+                       drm_object_attach_property(&radeon_connector->base.base,
                                                      dev->mode_config.scaling_mode_property,
                                                      DRM_MODE_SCALE_FULLSCREEN);
                        subpixel_order = SubPixelHorizontalRGB;
@@ -1651,7 +1651,7 @@ radeon_add_atom_connector(struct drm_device *dev,
                                        DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
                        }
                        radeon_connector->dac_load_detect = true;
-                       drm_connector_attach_property(&radeon_connector->base,
+                       drm_object_attach_property(&radeon_connector->base.base,
                                                      rdev->mode_info.load_detect_property,
                                                      1);
                        /* no HPD on analog connectors */
@@ -1669,7 +1669,7 @@ radeon_add_atom_connector(struct drm_device *dev,
                                        DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
                        }
                        radeon_connector->dac_load_detect = true;
-                       drm_connector_attach_property(&radeon_connector->base,
+                       drm_object_attach_property(&radeon_connector->base.base,
                                                      rdev->mode_info.load_detect_property,
                                                      1);
                        /* no HPD on analog connectors */
@@ -1692,23 +1692,23 @@ radeon_add_atom_connector(struct drm_device *dev,
                                        DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
                        }
                        subpixel_order = SubPixelHorizontalRGB;
-                       drm_connector_attach_property(&radeon_connector->base,
+                       drm_object_attach_property(&radeon_connector->base.base,
                                                      rdev->mode_info.coherent_mode_property,
                                                      1);
                        if (ASIC_IS_AVIVO(rdev)) {
-                               drm_connector_attach_property(&radeon_connector->base,
+                               drm_object_attach_property(&radeon_connector->base.base,
                                                              rdev->mode_info.underscan_property,
                                                              UNDERSCAN_OFF);
-                               drm_connector_attach_property(&radeon_connector->base,
+                               drm_object_attach_property(&radeon_connector->base.base,
                                                              rdev->mode_info.underscan_hborder_property,
                                                              0);
-                               drm_connector_attach_property(&radeon_connector->base,
+                               drm_object_attach_property(&radeon_connector->base.base,
                                                              rdev->mode_info.underscan_vborder_property,
                                                              0);
                        }
                        if (connector_type == DRM_MODE_CONNECTOR_DVII) {
                                radeon_connector->dac_load_detect = true;
-                               drm_connector_attach_property(&radeon_connector->base,
+                               drm_object_attach_property(&radeon_connector->base.base,
                                                              rdev->mode_info.load_detect_property,
                                                              1);
                        }
@@ -1732,17 +1732,17 @@ radeon_add_atom_connector(struct drm_device *dev,
                                if (!radeon_connector->ddc_bus)
                                        DRM_ERROR("HDMI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
                        }
-                       drm_connector_attach_property(&radeon_connector->base,
+                       drm_object_attach_property(&radeon_connector->base.base,
                                                      rdev->mode_info.coherent_mode_property,
                                                      1);
                        if (ASIC_IS_AVIVO(rdev)) {
-                               drm_connector_attach_property(&radeon_connector->base,
+                               drm_object_attach_property(&radeon_connector->base.base,
                                                              rdev->mode_info.underscan_property,
                                                              UNDERSCAN_OFF);
-                               drm_connector_attach_property(&radeon_connector->base,
+                               drm_object_attach_property(&radeon_connector->base.base,
                                                              rdev->mode_info.underscan_hborder_property,
                                                              0);
-                               drm_connector_attach_property(&radeon_connector->base,
+                               drm_object_attach_property(&radeon_connector->base.base,
                                                              rdev->mode_info.underscan_vborder_property,
                                                              0);
                        }
@@ -1771,17 +1771,17 @@ radeon_add_atom_connector(struct drm_device *dev,
                                        DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
                        }
                        subpixel_order = SubPixelHorizontalRGB;
-                       drm_connector_attach_property(&radeon_connector->base,
+                       drm_object_attach_property(&radeon_connector->base.base,
                                                      rdev->mode_info.coherent_mode_property,
                                                      1);
                        if (ASIC_IS_AVIVO(rdev)) {
-                               drm_connector_attach_property(&radeon_connector->base,
+                               drm_object_attach_property(&radeon_connector->base.base,
                                                              rdev->mode_info.underscan_property,
                                                              UNDERSCAN_OFF);
-                               drm_connector_attach_property(&radeon_connector->base,
+                               drm_object_attach_property(&radeon_connector->base.base,
                                                              rdev->mode_info.underscan_hborder_property,
                                                              0);
-                               drm_connector_attach_property(&radeon_connector->base,
+                               drm_object_attach_property(&radeon_connector->base.base,
                                                              rdev->mode_info.underscan_vborder_property,
                                                              0);
                        }
@@ -1806,7 +1806,7 @@ radeon_add_atom_connector(struct drm_device *dev,
                                if (!radeon_connector->ddc_bus)
                                        DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
                        }
-                       drm_connector_attach_property(&radeon_connector->base,
+                       drm_object_attach_property(&radeon_connector->base.base,
                                                      dev->mode_config.scaling_mode_property,
                                                      DRM_MODE_SCALE_FULLSCREEN);
                        subpixel_order = SubPixelHorizontalRGB;
@@ -1819,10 +1819,10 @@ radeon_add_atom_connector(struct drm_device *dev,
                        drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
                        drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
                        radeon_connector->dac_load_detect = true;
-                       drm_connector_attach_property(&radeon_connector->base,
+                       drm_object_attach_property(&radeon_connector->base.base,
                                                      rdev->mode_info.load_detect_property,
                                                      1);
-                       drm_connector_attach_property(&radeon_connector->base,
+                       drm_object_attach_property(&radeon_connector->base.base,
                                                      rdev->mode_info.tv_std_property,
                                                      radeon_atombios_get_tv_info(rdev));
                        /* no HPD on analog connectors */
@@ -1843,7 +1843,7 @@ radeon_add_atom_connector(struct drm_device *dev,
                                if (!radeon_connector->ddc_bus)
                                        DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
                        }
-                       drm_connector_attach_property(&radeon_connector->base,
+                       drm_object_attach_property(&radeon_connector->base.base,
                                                      dev->mode_config.scaling_mode_property,
                                                      DRM_MODE_SCALE_FULLSCREEN);
                        subpixel_order = SubPixelHorizontalRGB;
@@ -1922,7 +1922,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
                                DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
                }
                radeon_connector->dac_load_detect = true;
-               drm_connector_attach_property(&radeon_connector->base,
+               drm_object_attach_property(&radeon_connector->base.base,
                                              rdev->mode_info.load_detect_property,
                                              1);
                /* no HPD on analog connectors */
@@ -1940,7 +1940,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
                                DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
                }
                radeon_connector->dac_load_detect = true;
-               drm_connector_attach_property(&radeon_connector->base,
+               drm_object_attach_property(&radeon_connector->base.base,
                                              rdev->mode_info.load_detect_property,
                                              1);
                /* no HPD on analog connectors */
@@ -1959,7 +1959,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
                }
                if (connector_type == DRM_MODE_CONNECTOR_DVII) {
                        radeon_connector->dac_load_detect = true;
-                       drm_connector_attach_property(&radeon_connector->base,
+                       drm_object_attach_property(&radeon_connector->base.base,
                                                      rdev->mode_info.load_detect_property,
                                                      1);
                }
@@ -1983,10 +1983,10 @@ radeon_add_legacy_connector(struct drm_device *dev,
                 */
                if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480)
                        radeon_connector->dac_load_detect = false;
-               drm_connector_attach_property(&radeon_connector->base,
+               drm_object_attach_property(&radeon_connector->base.base,
                                              rdev->mode_info.load_detect_property,
                                              radeon_connector->dac_load_detect);
-               drm_connector_attach_property(&radeon_connector->base,
+               drm_object_attach_property(&radeon_connector->base.base,
                                              rdev->mode_info.tv_std_property,
                                              radeon_combios_get_tv_info(rdev));
                /* no HPD on analog connectors */
@@ -2002,7 +2002,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
                        if (!radeon_connector->ddc_bus)
                                DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
                }
-               drm_connector_attach_property(&radeon_connector->base,
+               drm_object_attach_property(&radeon_connector->base.base,
                                              dev->mode_config.scaling_mode_property,
                                              DRM_MODE_SCALE_FULLSCREEN);
                subpixel_order = SubPixelHorizontalRGB;
index 8b2797d..9143fc4 100644 (file)
@@ -116,20 +116,6 @@ u32 radeon_get_scratch(drm_radeon_private_t *dev_priv, int index)
        }
 }
 
-u32 RADEON_READ_MM(drm_radeon_private_t *dev_priv, int addr)
-{
-       u32 ret;
-
-       if (addr < 0x10000)
-               ret = DRM_READ32(dev_priv->mmio, addr);
-       else {
-               DRM_WRITE32(dev_priv->mmio, RADEON_MM_INDEX, addr);
-               ret = DRM_READ32(dev_priv->mmio, RADEON_MM_DATA);
-       }
-
-       return ret;
-}
-
 static u32 R500_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
 {
        u32 ret;
index 41672cc..396baba 100644 (file)
@@ -43,6 +43,7 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
                return 0;
        }
        chunk = &p->chunks[p->chunk_relocs_idx];
+       p->dma_reloc_idx = 0;
        /* FIXME: we assume that each relocs use 4 dwords */
        p->nrelocs = chunk->length_dw / 4;
        p->relocs_ptr = kcalloc(p->nrelocs, sizeof(void *), GFP_KERNEL);
@@ -111,6 +112,18 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority
                } else
                        p->ring = RADEON_RING_TYPE_GFX_INDEX;
                break;
+       case RADEON_CS_RING_DMA:
+               if (p->rdev->family >= CHIP_CAYMAN) {
+                       if (p->priority > 0)
+                               p->ring = R600_RING_TYPE_DMA_INDEX;
+                       else
+                               p->ring = CAYMAN_RING_TYPE_DMA1_INDEX;
+               } else if (p->rdev->family >= CHIP_R600) {
+                       p->ring = R600_RING_TYPE_DMA_INDEX;
+               } else {
+                       return -EINVAL;
+               }
+               break;
        }
        return 0;
 }
index 0fe56c9..ad6df62 100644 (file)
@@ -66,24 +66,25 @@ static void radeon_hide_cursor(struct drm_crtc *crtc)
        struct radeon_device *rdev = crtc->dev->dev_private;
 
        if (ASIC_IS_DCE4(rdev)) {
-               WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset);
-               WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
-                      EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
+               WREG32_IDX(EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset,
+                          EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
+                          EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
        } else if (ASIC_IS_AVIVO(rdev)) {
-               WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset);
-               WREG32(RADEON_MM_DATA, (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
+               WREG32_IDX(AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset,
+                          (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
        } else {
+               u32 reg;
                switch (radeon_crtc->crtc_id) {
                case 0:
-                       WREG32(RADEON_MM_INDEX, RADEON_CRTC_GEN_CNTL);
+                       reg = RADEON_CRTC_GEN_CNTL;
                        break;
                case 1:
-                       WREG32(RADEON_MM_INDEX, RADEON_CRTC2_GEN_CNTL);
+                       reg = RADEON_CRTC2_GEN_CNTL;
                        break;
                default:
                        return;
                }
-               WREG32_P(RADEON_MM_DATA, 0, ~RADEON_CRTC_CUR_EN);
+               WREG32_IDX(reg, RREG32_IDX(reg) & ~RADEON_CRTC_CUR_EN);
        }
 }
 
index e2f5f88..49b0659 100644 (file)
@@ -1059,6 +1059,7 @@ int radeon_device_init(struct radeon_device *rdev,
 
        /* Registers mapping */
        /* TODO: block userspace mapping of io register */
+       spin_lock_init(&rdev->mmio_idx_lock);
        rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
        rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
        rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
index bfa2a60..310c0e5 100644 (file)
@@ -378,8 +378,12 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
        work->old_rbo = rbo;
        obj = new_radeon_fb->obj;
        rbo = gem_to_radeon_bo(obj);
+
+       spin_lock(&rbo->tbo.bdev->fence_lock);
        if (rbo->tbo.sync_obj)
                work->fence = radeon_fence_ref(rbo->tbo.sync_obj);
+       spin_unlock(&rbo->tbo.bdev->fence_lock);
+
        INIT_WORK(&work->work, radeon_unpin_work_func);
 
        /* We borrow the event spin lock for protecting unpin_work */
index 07eb84e..9b1a727 100644 (file)
  *   2.22.0 - r600 only: RESOLVE_BOX allowed
  *   2.23.0 - allow STRMOUT_BASE_UPDATE on RS780 and RS880
  *   2.24.0 - eg only: allow MIP_ADDRESS=0 for MSAA textures
+ *   2.25.0 - eg+: new info request for num SE and num SH
+ *   2.26.0 - r600-eg: fix htile size computation
+ *   2.27.0 - r600-SI: Add CS ioctl support for async DMA
  */
 #define KMS_DRIVER_MAJOR       2
-#define KMS_DRIVER_MINOR       24
+#define KMS_DRIVER_MINOR       27
 #define KMS_DRIVER_PATCHLEVEL  0
 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
 int radeon_driver_unload_kms(struct drm_device *dev);
@@ -281,12 +284,15 @@ static struct drm_driver driver_old = {
 
 static struct drm_driver kms_driver;
 
-static void radeon_kick_out_firmware_fb(struct pci_dev *pdev)
+static int radeon_kick_out_firmware_fb(struct pci_dev *pdev)
 {
        struct apertures_struct *ap;
        bool primary = false;
 
        ap = alloc_apertures(1);
+       if (!ap)
+               return -ENOMEM;
+
        ap->ranges[0].base = pci_resource_start(pdev, 0);
        ap->ranges[0].size = pci_resource_len(pdev, 0);
 
@@ -295,13 +301,19 @@ static void radeon_kick_out_firmware_fb(struct pci_dev *pdev)
 #endif
        remove_conflicting_framebuffers(ap, "radeondrmfb", primary);
        kfree(ap);
+
+       return 0;
 }
 
 static int __devinit
 radeon_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
+       int ret;
+
        /* Get rid of things like offb */
-       radeon_kick_out_firmware_fb(pdev);
+       ret = radeon_kick_out_firmware_fb(pdev);
+       if (ret)
+               return ret;
 
        return drm_get_pci_dev(pdev, ent, &kms_driver);
 }
index a1b59ca..e7fdf16 100644 (file)
@@ -366,7 +366,6 @@ extern int radeon_cp_buffers(struct drm_device *dev, void *data, struct drm_file
 extern u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv);
 extern void radeon_write_agp_location(drm_radeon_private_t *dev_priv, u32 agp_loc);
 extern void radeon_write_agp_base(drm_radeon_private_t *dev_priv, u64 agp_base);
-extern u32 RADEON_READ_MM(drm_radeon_private_t *dev_priv, int addr);
 
 extern void radeon_freelist_reset(struct drm_device * dev);
 extern struct drm_buf *radeon_freelist_get(struct drm_device * dev);
index 22bd6c2..410a975 100644 (file)
@@ -772,7 +772,7 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
        int r;
 
        radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
-       if (rdev->wb.use_event) {
+       if (rdev->wb.use_event || !radeon_ring_supports_scratch_reg(rdev, &rdev->ring[ring])) {
                rdev->fence_drv[ring].scratch_reg = 0;
                index = R600_WB_EVENT_OFFSET + ring * 4;
        } else {
index 4debd60..6e24f84 100644 (file)
@@ -1237,7 +1237,6 @@ void radeon_vm_bo_invalidate(struct radeon_device *rdev,
 {
        struct radeon_bo_va *bo_va;
 
-       BUG_ON(!atomic_read(&bo->tbo.reserved));
        list_for_each_entry(bo_va, &bo->va, bo_list) {
                bo_va->valid = false;
        }
index dc781c4..9c312f9 100644 (file)
@@ -361,6 +361,22 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
                        return -EINVAL;
                }
                break;
+       case RADEON_INFO_MAX_SE:
+               if (rdev->family >= CHIP_TAHITI)
+                       value = rdev->config.si.max_shader_engines;
+               else if (rdev->family >= CHIP_CAYMAN)
+                       value = rdev->config.cayman.max_shader_engines;
+               else if (rdev->family >= CHIP_CEDAR)
+                       value = rdev->config.evergreen.num_ses;
+               else
+                       value = 1;
+               break;
+       case RADEON_INFO_MAX_SH_PER_SE:
+               if (rdev->family >= CHIP_TAHITI)
+                       value = rdev->config.si.max_sh_per_se;
+               else
+                       return -EINVAL;
+               break;
        default:
                DRM_DEBUG_KMS("Invalid request %d\n", info->request);
                return -EINVAL;
index 92c5f47..d818b50 100644 (file)
@@ -427,7 +427,7 @@ struct radeon_connector_atom_dig {
        uint32_t igp_lane_info;
        /* displayport */
        struct radeon_i2c_chan *dp_i2c_bus;
-       u8 dpcd[8];
+       u8 dpcd[DP_RECEIVER_CAP_SIZE];
        u8 dp_sink_type;
        int dp_clock;
        int dp_lane_count;
index b91118c..883c95d 100644 (file)
@@ -84,17 +84,34 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
        rbo->placement.fpfn = 0;
        rbo->placement.lpfn = 0;
        rbo->placement.placement = rbo->placements;
-       rbo->placement.busy_placement = rbo->placements;
        if (domain & RADEON_GEM_DOMAIN_VRAM)
                rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
                                        TTM_PL_FLAG_VRAM;
-       if (domain & RADEON_GEM_DOMAIN_GTT)
-               rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
-       if (domain & RADEON_GEM_DOMAIN_CPU)
-               rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+       if (domain & RADEON_GEM_DOMAIN_GTT) {
+               if (rbo->rdev->flags & RADEON_IS_AGP) {
+                       rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT;
+               } else {
+                       rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
+               }
+       }
+       if (domain & RADEON_GEM_DOMAIN_CPU) {
+               if (rbo->rdev->flags & RADEON_IS_AGP) {
+                       rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_SYSTEM;
+               } else {
+                       rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM;
+               }
+       }
        if (!c)
                rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
        rbo->placement.num_placement = c;
+
+       c = 0;
+       rbo->placement.busy_placement = rbo->busy_placements;
+       if (rbo->rdev->flags & RADEON_IS_AGP) {
+               rbo->busy_placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT;
+       } else {
+               rbo->busy_placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
+       }
        rbo->placement.num_busy_placement = c;
 }
 
@@ -140,7 +157,7 @@ int radeon_bo_create(struct radeon_device *rdev,
        /* Kernel allocation are uninterruptible */
        down_read(&rdev->pm.mclk_lock);
        r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
-                       &bo->placement, page_align, 0, !kernel, NULL,
+                       &bo->placement, page_align, !kernel, NULL,
                        acc_size, sg, &radeon_ttm_bo_destroy);
        up_read(&rdev->pm.mclk_lock);
        if (unlikely(r != 0)) {
@@ -240,7 +257,7 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
        }
        for (i = 0; i < bo->placement.num_placement; i++)
                bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
-       r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false);
+       r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
        if (likely(r == 0)) {
                bo->pin_count = 1;
                if (gpu_addr != NULL)
@@ -269,7 +286,7 @@ int radeon_bo_unpin(struct radeon_bo *bo)
                return 0;
        for (i = 0; i < bo->placement.num_placement; i++)
                bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
-       r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false);
+       r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
        if (unlikely(r != 0))
                dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
        return r;
@@ -340,7 +357,6 @@ int radeon_bo_list_validate(struct list_head *head)
 {
        struct radeon_bo_list *lobj;
        struct radeon_bo *bo;
-       u32 domain;
        int r;
 
        r = ttm_eu_reserve_buffers(head);
@@ -350,17 +366,9 @@ int radeon_bo_list_validate(struct list_head *head)
        list_for_each_entry(lobj, head, tv.head) {
                bo = lobj->bo;
                if (!bo->pin_count) {
-                       domain = lobj->wdomain ? lobj->wdomain : lobj->rdomain;
-                       
-               retry:
-                       radeon_ttm_placement_from_domain(bo, domain);
                        r = ttm_bo_validate(&bo->tbo, &bo->placement,
-                                               true, false, false);
+                                               true, false);
                        if (unlikely(r)) {
-                               if (r != -ERESTARTSYS && domain == RADEON_GEM_DOMAIN_VRAM) {
-                                       domain |= RADEON_GEM_DOMAIN_GTT;
-                                       goto retry;
-                               }
                                return r;
                        }
                }
@@ -384,7 +392,7 @@ int radeon_bo_get_surface_reg(struct radeon_bo *bo)
        int steal;
        int i;
 
-       BUG_ON(!atomic_read(&bo->tbo.reserved));
+       BUG_ON(!radeon_bo_is_reserved(bo));
 
        if (!bo->tiling_flags)
                return 0;
@@ -510,7 +518,7 @@ void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
                                uint32_t *tiling_flags,
                                uint32_t *pitch)
 {
-       BUG_ON(!atomic_read(&bo->tbo.reserved));
+       BUG_ON(!radeon_bo_is_reserved(bo));
        if (tiling_flags)
                *tiling_flags = bo->tiling_flags;
        if (pitch)
@@ -520,7 +528,7 @@ void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
 int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
                                bool force_drop)
 {
-       BUG_ON(!atomic_read(&bo->tbo.reserved));
+       BUG_ON(!radeon_bo_is_reserved(bo) && !force_drop);
 
        if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
                return 0;
@@ -575,7 +583,7 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
                        /* hurrah the memory is not visible ! */
                        radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
                        rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
-                       r = ttm_bo_validate(bo, &rbo->placement, false, true, false);
+                       r = ttm_bo_validate(bo, &rbo->placement, false, false);
                        if (unlikely(r != 0))
                                return r;
                        offset = bo->mem.start << PAGE_SHIFT;
index 93cd491..5fc86b0 100644 (file)
@@ -80,7 +80,7 @@ static inline unsigned long radeon_bo_size(struct radeon_bo *bo)
 
 static inline bool radeon_bo_is_reserved(struct radeon_bo *bo)
 {
-       return !!atomic_read(&bo->tbo.reserved);
+       return ttm_bo_is_reserved(&bo->tbo);
 }
 
 static inline unsigned radeon_bo_ngpu_pages(struct radeon_bo *bo)
index 587c09a..fda09c9 100644 (file)
 #include "radeon_reg.h"
 #include "radeon.h"
 
+#define RADEON_TEST_COPY_BLIT 1
+#define RADEON_TEST_COPY_DMA  0
+
 
 /* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */
-void radeon_test_moves(struct radeon_device *rdev)
+static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
 {
        struct radeon_bo *vram_obj = NULL;
        struct radeon_bo **gtt_obj = NULL;
        struct radeon_fence *fence = NULL;
        uint64_t gtt_addr, vram_addr;
        unsigned i, n, size;
-       int r;
+       int r, ring;
+
+       switch (flag) {
+       case RADEON_TEST_COPY_DMA:
+               ring = radeon_copy_dma_ring_index(rdev);
+               break;
+       case RADEON_TEST_COPY_BLIT:
+               ring = radeon_copy_blit_ring_index(rdev);
+               break;
+       default:
+               DRM_ERROR("Unknown copy method\n");
+               return;
+       }
 
        size = 1024 * 1024;
 
@@ -106,7 +121,10 @@ void radeon_test_moves(struct radeon_device *rdev)
 
                radeon_bo_kunmap(gtt_obj[i]);
 
-               r = radeon_copy(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
+               if (ring == R600_RING_TYPE_DMA_INDEX)
+                       r = radeon_copy_dma(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
+               else
+                       r = radeon_copy_blit(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
                if (r) {
                        DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
                        goto out_cleanup;
@@ -149,7 +167,10 @@ void radeon_test_moves(struct radeon_device *rdev)
 
                radeon_bo_kunmap(vram_obj);
 
-               r = radeon_copy(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
+               if (ring == R600_RING_TYPE_DMA_INDEX)
+                       r = radeon_copy_dma(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
+               else
+                       r = radeon_copy_blit(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
                if (r) {
                        DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
                        goto out_cleanup;
@@ -223,6 +244,14 @@ out_cleanup:
        }
 }
 
+void radeon_test_moves(struct radeon_device *rdev)
+{
+       if (rdev->asic->copy.dma)
+               radeon_do_test_moves(rdev, RADEON_TEST_COPY_DMA);
+       if (rdev->asic->copy.blit)
+               radeon_do_test_moves(rdev, RADEON_TEST_COPY_BLIT);
+}
+
 void radeon_test_ring_sync(struct radeon_device *rdev,
                           struct radeon_ring *ringA,
                           struct radeon_ring *ringB)
index 5ebe1b3..1d8ff2f 100644 (file)
@@ -216,7 +216,7 @@ static void radeon_move_null(struct ttm_buffer_object *bo,
 }
 
 static int radeon_move_blit(struct ttm_buffer_object *bo,
-                       bool evict, int no_wait_reserve, bool no_wait_gpu,
+                       bool evict, bool no_wait_gpu,
                        struct ttm_mem_reg *new_mem,
                        struct ttm_mem_reg *old_mem)
 {
@@ -265,15 +265,15 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
                        new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */
                        &fence);
        /* FIXME: handle copy error */
-       r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL,
-                                     evict, no_wait_reserve, no_wait_gpu, new_mem);
+       r = ttm_bo_move_accel_cleanup(bo, (void *)fence,
+                                     evict, no_wait_gpu, new_mem);
        radeon_fence_unref(&fence);
        return r;
 }
 
 static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
                                bool evict, bool interruptible,
-                               bool no_wait_reserve, bool no_wait_gpu,
+                               bool no_wait_gpu,
                                struct ttm_mem_reg *new_mem)
 {
        struct radeon_device *rdev;
@@ -294,7 +294,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
        placement.busy_placement = &placements;
        placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
        r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
-                            interruptible, no_wait_reserve, no_wait_gpu);
+                            interruptible, no_wait_gpu);
        if (unlikely(r)) {
                return r;
        }
@@ -308,11 +308,11 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
        if (unlikely(r)) {
                goto out_cleanup;
        }
-       r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem, old_mem);
+       r = radeon_move_blit(bo, true, no_wait_gpu, &tmp_mem, old_mem);
        if (unlikely(r)) {
                goto out_cleanup;
        }
-       r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
+       r = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
 out_cleanup:
        ttm_bo_mem_put(bo, &tmp_mem);
        return r;
@@ -320,7 +320,7 @@ out_cleanup:
 
 static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
                                bool evict, bool interruptible,
-                               bool no_wait_reserve, bool no_wait_gpu,
+                               bool no_wait_gpu,
                                struct ttm_mem_reg *new_mem)
 {
        struct radeon_device *rdev;
@@ -340,15 +340,16 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
        placement.num_busy_placement = 1;
        placement.busy_placement = &placements;
        placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
-       r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait_reserve, no_wait_gpu);
+       r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
+                            interruptible, no_wait_gpu);
        if (unlikely(r)) {
                return r;
        }
-       r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
+       r = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
        if (unlikely(r)) {
                goto out_cleanup;
        }
-       r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, new_mem, old_mem);
+       r = radeon_move_blit(bo, true, no_wait_gpu, new_mem, old_mem);
        if (unlikely(r)) {
                goto out_cleanup;
        }
@@ -359,7 +360,7 @@ out_cleanup:
 
 static int radeon_bo_move(struct ttm_buffer_object *bo,
                        bool evict, bool interruptible,
-                       bool no_wait_reserve, bool no_wait_gpu,
+                       bool no_wait_gpu,
                        struct ttm_mem_reg *new_mem)
 {
        struct radeon_device *rdev;
@@ -388,18 +389,18 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
        if (old_mem->mem_type == TTM_PL_VRAM &&
            new_mem->mem_type == TTM_PL_SYSTEM) {
                r = radeon_move_vram_ram(bo, evict, interruptible,
-                                       no_wait_reserve, no_wait_gpu, new_mem);
+                                       no_wait_gpu, new_mem);
        } else if (old_mem->mem_type == TTM_PL_SYSTEM &&
                   new_mem->mem_type == TTM_PL_VRAM) {
                r = radeon_move_ram_vram(bo, evict, interruptible,
-                                           no_wait_reserve, no_wait_gpu, new_mem);
+                                           no_wait_gpu, new_mem);
        } else {
-               r = radeon_move_blit(bo, evict, no_wait_reserve, no_wait_gpu, new_mem, old_mem);
+               r = radeon_move_blit(bo, evict, no_wait_gpu, new_mem, old_mem);
        }
 
        if (r) {
 memcpy:
-               r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+               r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
        }
        return r;
 }
@@ -471,13 +472,12 @@ static void radeon_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_re
 {
 }
 
-static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg,
-                               bool lazy, bool interruptible)
+static int radeon_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible)
 {
        return radeon_fence_wait((struct radeon_fence *)sync_obj, interruptible);
 }
 
-static int radeon_sync_obj_flush(void *sync_obj, void *sync_arg)
+static int radeon_sync_obj_flush(void *sync_obj)
 {
        return 0;
 }
@@ -492,7 +492,7 @@ static void *radeon_sync_obj_ref(void *sync_obj)
        return radeon_fence_ref((struct radeon_fence *)sync_obj);
 }
 
-static bool radeon_sync_obj_signaled(void *sync_obj, void *sync_arg)
+static bool radeon_sync_obj_signaled(void *sync_obj)
 {
        return radeon_fence_signaled((struct radeon_fence *)sync_obj);
 }
index 785d095..2bb6d0e 100644 (file)
@@ -40,6 +40,12 @@ static int rv515_debugfs_ga_info_init(struct radeon_device *rdev);
 static void rv515_gpu_init(struct radeon_device *rdev);
 int rv515_mc_wait_for_idle(struct radeon_device *rdev);
 
+static const u32 crtc_offsets[2] =
+{
+       0,
+       AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
+};
+
 void rv515_debugfs(struct radeon_device *rdev)
 {
        if (r100_debugfs_rbbm_init(rdev)) {
@@ -281,30 +287,114 @@ static int rv515_debugfs_ga_info_init(struct radeon_device *rdev)
 
 void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
 {
+       u32 crtc_enabled, tmp, frame_count, blackout;
+       int i, j;
+
        save->vga_render_control = RREG32(R_000300_VGA_RENDER_CONTROL);
        save->vga_hdp_control = RREG32(R_000328_VGA_HDP_CONTROL);
 
-       /* Stop all video */
-       WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0);
+       /* disable VGA render */
        WREG32(R_000300_VGA_RENDER_CONTROL, 0);
-       WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 1);
-       WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 1);
-       WREG32(R_006080_D1CRTC_CONTROL, 0);
-       WREG32(R_006880_D2CRTC_CONTROL, 0);
-       WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 0);
-       WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0);
-       WREG32(R_000330_D1VGA_CONTROL, 0);
-       WREG32(R_000338_D2VGA_CONTROL, 0);
+       /* blank the display controllers */
+       for (i = 0; i < rdev->num_crtc; i++) {
+               crtc_enabled = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]) & AVIVO_CRTC_EN;
+               if (crtc_enabled) {
+                       save->crtc_enabled[i] = true;
+                       tmp = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]);
+                       if (!(tmp & AVIVO_CRTC_DISP_READ_REQUEST_DISABLE)) {
+                               radeon_wait_for_vblank(rdev, i);
+                               tmp |= AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
+                               WREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i], tmp);
+                       }
+                       /* wait for the next frame */
+                       frame_count = radeon_get_vblank_counter(rdev, i);
+                       for (j = 0; j < rdev->usec_timeout; j++) {
+                               if (radeon_get_vblank_counter(rdev, i) != frame_count)
+                                       break;
+                               udelay(1);
+                       }
+               } else {
+                       save->crtc_enabled[i] = false;
+               }
+       }
+
+       radeon_mc_wait_for_idle(rdev);
+
+       if (rdev->family >= CHIP_R600) {
+               if (rdev->family >= CHIP_RV770)
+                       blackout = RREG32(R700_MC_CITF_CNTL);
+               else
+                       blackout = RREG32(R600_CITF_CNTL);
+               if ((blackout & R600_BLACKOUT_MASK) != R600_BLACKOUT_MASK) {
+                       /* Block CPU access */
+                       WREG32(R600_BIF_FB_EN, 0);
+                       /* blackout the MC */
+                       blackout |= R600_BLACKOUT_MASK;
+                       if (rdev->family >= CHIP_RV770)
+                               WREG32(R700_MC_CITF_CNTL, blackout);
+                       else
+                               WREG32(R600_CITF_CNTL, blackout);
+               }
+       }
 }
 
 void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
 {
-       WREG32(R_006110_D1GRPH_PRIMARY_SURFACE_ADDRESS, rdev->mc.vram_start);
-       WREG32(R_006118_D1GRPH_SECONDARY_SURFACE_ADDRESS, rdev->mc.vram_start);
-       WREG32(R_006910_D2GRPH_PRIMARY_SURFACE_ADDRESS, rdev->mc.vram_start);
-       WREG32(R_006918_D2GRPH_SECONDARY_SURFACE_ADDRESS, rdev->mc.vram_start);
-       WREG32(R_000310_VGA_MEMORY_BASE_ADDRESS, rdev->mc.vram_start);
-       /* Unlock host access */
+       u32 tmp, frame_count;
+       int i, j;
+
+       /* update crtc base addresses */
+       for (i = 0; i < rdev->num_crtc; i++) {
+               if (rdev->family >= CHIP_RV770) {
+                       if (i == 1) {
+                               WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH,
+                                      upper_32_bits(rdev->mc.vram_start));
+                               WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH,
+                                      upper_32_bits(rdev->mc.vram_start));
+                       } else {
+                               WREG32(R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH,
+                                      upper_32_bits(rdev->mc.vram_start));
+                               WREG32(R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH,
+                                      upper_32_bits(rdev->mc.vram_start));
+                       }
+               }
+               WREG32(R_006110_D1GRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
+                      (u32)rdev->mc.vram_start);
+               WREG32(R_006118_D1GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
+                      (u32)rdev->mc.vram_start);
+       }
+       WREG32(R_000310_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
+
+       if (rdev->family >= CHIP_R600) {
+               /* unblackout the MC */
+               if (rdev->family >= CHIP_RV770)
+                       tmp = RREG32(R700_MC_CITF_CNTL);
+               else
+                       tmp = RREG32(R600_CITF_CNTL);
+               tmp &= ~R600_BLACKOUT_MASK;
+               if (rdev->family >= CHIP_RV770)
+                       WREG32(R700_MC_CITF_CNTL, tmp);
+               else
+                       WREG32(R600_CITF_CNTL, tmp);
+               /* allow CPU access */
+               WREG32(R600_BIF_FB_EN, R600_FB_READ_EN | R600_FB_WRITE_EN);
+       }
+
+       for (i = 0; i < rdev->num_crtc; i++) {
+               if (save->crtc_enabled[i]) {
+                       tmp = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]);
+                       tmp &= ~AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
+                       WREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i], tmp);
+                       /* wait for the next frame */
+                       frame_count = radeon_get_vblank_counter(rdev, i);
+                       for (j = 0; j < rdev->usec_timeout; j++) {
+                               if (radeon_get_vblank_counter(rdev, i) != frame_count)
+                                       break;
+                               udelay(1);
+                       }
+               }
+       }
+       /* Unlock vga access */
        WREG32(R_000328_VGA_HDP_CONTROL, save->vga_hdp_control);
        mdelay(1);
        WREG32(R_000300_VGA_RENDER_CONTROL, save->vga_render_control);
index 79814a0..87c979c 100644 (file)
@@ -316,6 +316,7 @@ void r700_cp_stop(struct radeon_device *rdev)
        radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
        WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
        WREG32(SCRATCH_UMSK, 0);
+       rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
 }
 
 static int rv770_cp_load_microcode(struct radeon_device *rdev)
@@ -583,6 +584,8 @@ static void rv770_gpu_init(struct radeon_device *rdev)
        WREG32(GB_TILING_CONFIG, gb_tiling_config);
        WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
        WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
+       WREG32(DMA_TILING_CONFIG, (gb_tiling_config & 0xffff));
+       WREG32(DMA_TILING_CONFIG2, (gb_tiling_config & 0xffff));
 
        WREG32(CGTS_SYS_TCC_DISABLE, 0);
        WREG32(CGTS_TCC_DISABLE, 0);
@@ -886,7 +889,7 @@ static int rv770_mc_init(struct radeon_device *rdev)
 
 static int rv770_startup(struct radeon_device *rdev)
 {
-       struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+       struct radeon_ring *ring;
        int r;
 
        /* enable pcie gen2 link */
@@ -932,6 +935,12 @@ static int rv770_startup(struct radeon_device *rdev)
                return r;
        }
 
+       r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
+       if (r) {
+               dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+               return r;
+       }
+
        /* Enable IRQ */
        r = r600_irq_init(rdev);
        if (r) {
@@ -941,11 +950,20 @@ static int rv770_startup(struct radeon_device *rdev)
        }
        r600_irq_set(rdev);
 
+       ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
                             R600_CP_RB_RPTR, R600_CP_RB_WPTR,
                             0, 0xfffff, RADEON_CP_PACKET2);
        if (r)
                return r;
+
+       ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+       r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
+                            DMA_RB_RPTR, DMA_RB_WPTR,
+                            2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+       if (r)
+               return r;
+
        r = rv770_cp_load_microcode(rdev);
        if (r)
                return r;
@@ -953,6 +971,10 @@ static int rv770_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
+       r = r600_dma_resume(rdev);
+       if (r)
+               return r;
+
        r = radeon_ib_pool_init(rdev);
        if (r) {
                dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
@@ -995,7 +1017,7 @@ int rv770_suspend(struct radeon_device *rdev)
 {
        r600_audio_fini(rdev);
        r700_cp_stop(rdev);
-       rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+       r600_dma_stop(rdev);
        r600_irq_suspend(rdev);
        radeon_wb_disable(rdev);
        rv770_pcie_gart_disable(rdev);
@@ -1066,6 +1088,9 @@ int rv770_init(struct radeon_device *rdev)
        rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
        r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
 
+       rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
+       r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
+
        rdev->ih.ring_obj = NULL;
        r600_ih_ring_init(rdev, 64 * 1024);
 
@@ -1078,6 +1103,7 @@ int rv770_init(struct radeon_device *rdev)
        if (r) {
                dev_err(rdev->dev, "disabling GPU acceleration\n");
                r700_cp_fini(rdev);
+               r600_dma_fini(rdev);
                r600_irq_fini(rdev);
                radeon_wb_fini(rdev);
                radeon_ib_pool_fini(rdev);
@@ -1093,6 +1119,7 @@ void rv770_fini(struct radeon_device *rdev)
 {
        r600_blit_fini(rdev);
        r700_cp_fini(rdev);
+       r600_dma_fini(rdev);
        r600_irq_fini(rdev);
        radeon_wb_fini(rdev);
        radeon_ib_pool_fini(rdev);
index b0adfc5..20e29d2 100644 (file)
 #define     PIPE_TILING__SHIFT              1
 #define     PIPE_TILING__MASK               0x0000000e
 
+#define DMA_TILING_CONFIG                               0x3ec8
+#define DMA_TILING_CONFIG2                              0xd0b8
+
 #define        GC_USER_SHADER_PIPE_CONFIG                      0x8954
 #define                INACTIVE_QD_PIPES(x)                            ((x) << 8)
 #define                INACTIVE_QD_PIPES_MASK                          0x0000FF00
 
 #define        WAIT_UNTIL                                      0x8040
 
+/* async DMA */
+#define DMA_RB_RPTR                                       0xd008
+#define DMA_RB_WPTR                                       0xd00c
+
+/* async DMA packets */
+#define DMA_PACKET(cmd, t, s, n)       ((((cmd) & 0xF) << 28) |        \
+                                        (((t) & 0x1) << 23) |          \
+                                        (((s) & 0x1) << 22) |          \
+                                        (((n) & 0xFFFF) << 0))
+/* async DMA Packet types */
+#define        DMA_PACKET_WRITE                                  0x2
+#define        DMA_PACKET_COPY                                   0x3
+#define        DMA_PACKET_INDIRECT_BUFFER                        0x4
+#define        DMA_PACKET_SEMAPHORE                              0x5
+#define        DMA_PACKET_FENCE                                  0x6
+#define        DMA_PACKET_TRAP                                   0x7
+#define        DMA_PACKET_CONSTANT_FILL                          0xd
+#define        DMA_PACKET_NOP                                    0xf
+
+
 #define        SRBM_STATUS                                     0x0E50
 
 /* DCE 3.2 HDMI */
 #define HDMI_OFFSET0                      (0x7400 - 0x7400)
 #define HDMI_OFFSET1                      (0x7800 - 0x7400)
 
+/* DCE3.2 ELD audio interface */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0        0x71c8 /* LPCM */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1        0x71cc /* AC3 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2        0x71d0 /* MPEG1 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR3        0x71d4 /* MP3 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR4        0x71d8 /* MPEG2 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR5        0x71dc /* AAC */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR6        0x71e0 /* DTS */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR7        0x71e4 /* ATRAC */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR8        0x71e8 /* one bit audio - leave at 0 (default) */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR9        0x71ec /* Dolby Digital */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR10       0x71f0 /* DTS-HD */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR11       0x71f4 /* MAT-MLP */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR12       0x71f8 /* DTS */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13       0x71fc /* WMA Pro */
+#       define MAX_CHANNELS(x)                            (((x) & 0x7) << 0)
+/* max channels minus one.  7 = 8 channels */
+#       define SUPPORTED_FREQUENCIES(x)                   (((x) & 0xff) << 8)
+#       define DESCRIPTOR_BYTE_2(x)                       (((x) & 0xff) << 16)
+#       define SUPPORTED_FREQUENCIES_STEREO(x)            (((x) & 0xff) << 24) /* LPCM only */
+/* SUPPORTED_FREQUENCIES, SUPPORTED_FREQUENCIES_STEREO
+ * bit0 = 32 kHz
+ * bit1 = 44.1 kHz
+ * bit2 = 48 kHz
+ * bit3 = 88.2 kHz
+ * bit4 = 96 kHz
+ * bit5 = 176.4 kHz
+ * bit6 = 192 kHz
+ */
+
+#define AZ_HOT_PLUG_CONTROL                               0x7300
+#       define AZ_FORCE_CODEC_WAKE                        (1 << 0)
+#       define PIN0_JACK_DETECTION_ENABLE                 (1 << 4)
+#       define PIN1_JACK_DETECTION_ENABLE                 (1 << 5)
+#       define PIN2_JACK_DETECTION_ENABLE                 (1 << 6)
+#       define PIN3_JACK_DETECTION_ENABLE                 (1 << 7)
+#       define PIN0_UNSOLICITED_RESPONSE_ENABLE           (1 << 8)
+#       define PIN1_UNSOLICITED_RESPONSE_ENABLE           (1 << 9)
+#       define PIN2_UNSOLICITED_RESPONSE_ENABLE           (1 << 10)
+#       define PIN3_UNSOLICITED_RESPONSE_ENABLE           (1 << 11)
+#       define CODEC_HOT_PLUG_ENABLE                      (1 << 12)
+#       define PIN0_AUDIO_ENABLED                         (1 << 24)
+#       define PIN1_AUDIO_ENABLED                         (1 << 25)
+#       define PIN2_AUDIO_ENABLED                         (1 << 26)
+#       define PIN3_AUDIO_ENABLED                         (1 << 27)
+#       define AUDIO_ENABLED                              (1 << 31)
+
+
 #define D1GRPH_PRIMARY_SURFACE_ADDRESS                    0x6110
 #define D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH               0x6914
 #define D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH               0x6114
index 010156d..ef68365 100644 (file)
@@ -1660,6 +1660,8 @@ static void si_gpu_init(struct radeon_device *rdev)
        WREG32(GB_ADDR_CONFIG, gb_addr_config);
        WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
        WREG32(HDP_ADDR_CONFIG, gb_addr_config);
+       WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
+       WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
 
        si_tiling_mode_table_init(rdev);
 
@@ -1836,6 +1838,9 @@ static void si_cp_enable(struct radeon_device *rdev, bool enable)
                radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
                WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT));
                WREG32(SCRATCH_UMSK, 0);
+               rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+               rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
+               rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
        }
        udelay(50);
 }
@@ -2426,9 +2431,20 @@ static int si_pcie_gart_enable(struct radeon_device *rdev)
        /* enable context1-15 */
        WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
               (u32)(rdev->dummy_page.addr >> 12));
-       WREG32(VM_CONTEXT1_CNTL2, 0);
+       WREG32(VM_CONTEXT1_CNTL2, 4);
        WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
-                               RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
+                               RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+                               RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
+                               DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+                               DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
+                               PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
+                               PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
+                               VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
+                               VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
+                               READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
+                               READ_PROTECTION_FAULT_ENABLE_DEFAULT |
+                               WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
+                               WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
 
        si_pcie_gart_tlb_flush(rdev);
        DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
@@ -2534,6 +2550,7 @@ static int si_vm_packet3_gfx_check(struct radeon_device *rdev,
        u32 idx = pkt->idx + 1;
        u32 idx_value = ib[idx];
        u32 start_reg, end_reg, reg, i;
+       u32 command, info;
 
        switch (pkt->opcode) {
        case PACKET3_NOP:
@@ -2633,6 +2650,52 @@ static int si_vm_packet3_gfx_check(struct radeon_device *rdev,
                                return -EINVAL;
                }
                break;
+       case PACKET3_CP_DMA:
+               command = ib[idx + 4];
+               info = ib[idx + 1];
+               if (command & PACKET3_CP_DMA_CMD_SAS) {
+                       /* src address space is register */
+                       if (((info & 0x60000000) >> 29) == 0) {
+                               start_reg = idx_value << 2;
+                               if (command & PACKET3_CP_DMA_CMD_SAIC) {
+                                       reg = start_reg;
+                                       if (!si_vm_reg_valid(reg)) {
+                                               DRM_ERROR("CP DMA Bad SRC register\n");
+                                               return -EINVAL;
+                                       }
+                               } else {
+                                       for (i = 0; i < (command & 0x1fffff); i++) {
+                                               reg = start_reg + (4 * i);
+                                               if (!si_vm_reg_valid(reg)) {
+                                                       DRM_ERROR("CP DMA Bad SRC register\n");
+                                                       return -EINVAL;
+                                               }
+                                       }
+                               }
+                       }
+               }
+               if (command & PACKET3_CP_DMA_CMD_DAS) {
+                       /* dst address space is register */
+                       if (((info & 0x00300000) >> 20) == 0) {
+                               start_reg = ib[idx + 2];
+                               if (command & PACKET3_CP_DMA_CMD_DAIC) {
+                                       reg = start_reg;
+                                       if (!si_vm_reg_valid(reg)) {
+                                               DRM_ERROR("CP DMA Bad DST register\n");
+                                               return -EINVAL;
+                                       }
+                               } else {
+                                       for (i = 0; i < (command & 0x1fffff); i++) {
+                                               reg = start_reg + (4 * i);
+                                               if (!si_vm_reg_valid(reg)) {
+                                                       DRM_ERROR("CP DMA Bad DST register\n");
+                                                       return -EINVAL;
+                                               }
+                                       }
+                               }
+                       }
+               }
+               break;
        default:
                DRM_ERROR("Invalid GFX packet3: 0x%x\n", pkt->opcode);
                return -EINVAL;
@@ -2809,30 +2872,86 @@ void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
 {
        struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index];
        uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
-
-       while (count) {
-               unsigned ndw = 2 + count * 2;
-               if (ndw > 0x3FFE)
-                       ndw = 0x3FFE;
-
-               radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, ndw));
-               radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
-                                        WRITE_DATA_DST_SEL(1)));
-               radeon_ring_write(ring, pe);
-               radeon_ring_write(ring, upper_32_bits(pe));
-               for (; ndw > 2; ndw -= 2, --count, pe += 8) {
-                       uint64_t value;
-                       if (flags & RADEON_VM_PAGE_SYSTEM) {
-                               value = radeon_vm_map_gart(rdev, addr);
-                               value &= 0xFFFFFFFFFFFFF000ULL;
-                       } else if (flags & RADEON_VM_PAGE_VALID)
-                               value = addr;
-                       else
-                               value = 0;
-                       addr += incr;
-                       value |= r600_flags;
-                       radeon_ring_write(ring, value);
-                       radeon_ring_write(ring, upper_32_bits(value));
+       uint64_t value;
+       unsigned ndw;
+
+       if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) {
+               while (count) {
+                       ndw = 2 + count * 2;
+                       if (ndw > 0x3FFE)
+                               ndw = 0x3FFE;
+
+                       radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, ndw));
+                       radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+                                                WRITE_DATA_DST_SEL(1)));
+                       radeon_ring_write(ring, pe);
+                       radeon_ring_write(ring, upper_32_bits(pe));
+                       for (; ndw > 2; ndw -= 2, --count, pe += 8) {
+                               if (flags & RADEON_VM_PAGE_SYSTEM) {
+                                       value = radeon_vm_map_gart(rdev, addr);
+                                       value &= 0xFFFFFFFFFFFFF000ULL;
+                               } else if (flags & RADEON_VM_PAGE_VALID) {
+                                       value = addr;
+                               } else {
+                                       value = 0;
+                               }
+                               addr += incr;
+                               value |= r600_flags;
+                               radeon_ring_write(ring, value);
+                               radeon_ring_write(ring, upper_32_bits(value));
+                       }
+               }
+       } else {
+               /* DMA */
+               if (flags & RADEON_VM_PAGE_SYSTEM) {
+                       while (count) {
+                               ndw = count * 2;
+                               if (ndw > 0xFFFFE)
+                                       ndw = 0xFFFFE;
+
+                               /* for non-physically contiguous pages (system) */
+                               radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw));
+                               radeon_ring_write(ring, pe);
+                               radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
+                               for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+                                       if (flags & RADEON_VM_PAGE_SYSTEM) {
+                                               value = radeon_vm_map_gart(rdev, addr);
+                                               value &= 0xFFFFFFFFFFFFF000ULL;
+                                       } else if (flags & RADEON_VM_PAGE_VALID) {
+                                               value = addr;
+                                       } else {
+                                               value = 0;
+                                       }
+                                       addr += incr;
+                                       value |= r600_flags;
+                                       radeon_ring_write(ring, value);
+                                       radeon_ring_write(ring, upper_32_bits(value));
+                               }
+                       }
+               } else {
+                       while (count) {
+                               ndw = count * 2;
+                               if (ndw > 0xFFFFE)
+                                       ndw = 0xFFFFE;
+
+                               if (flags & RADEON_VM_PAGE_VALID)
+                                       value = addr;
+                               else
+                                       value = 0;
+                               /* for physically contiguous pages (vram) */
+                               radeon_ring_write(ring, DMA_PTE_PDE_PACKET(ndw));
+                               radeon_ring_write(ring, pe); /* dst addr */
+                               radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
+                               radeon_ring_write(ring, r600_flags); /* mask */
+                               radeon_ring_write(ring, 0);
+                               radeon_ring_write(ring, value); /* value */
+                               radeon_ring_write(ring, upper_32_bits(value));
+                               radeon_ring_write(ring, incr); /* increment size */
+                               radeon_ring_write(ring, 0);
+                               pe += ndw * 4;
+                               addr += (ndw / 2) * incr;
+                               count -= ndw / 2;
+                       }
                }
        }
 }
@@ -2880,6 +2999,32 @@ void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
        radeon_ring_write(ring, 0x0);
 }
 
+void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
+{
+       struct radeon_ring *ring = &rdev->ring[ridx];
+
+       if (vm == NULL)
+               return;
+
+       radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+       if (vm->id < 8) {
+               radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
+       } else {
+               radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2));
+       }
+       radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
+
+       /* flush hdp cache */
+       radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+       radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
+       radeon_ring_write(ring, 1);
+
+       /* bits 0-7 are the VM contexts0-7 */
+       radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+       radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
+       radeon_ring_write(ring, 1 << vm->id);
+}
+
 /*
  * RLC
  */
@@ -3048,6 +3193,10 @@ static void si_disable_interrupt_state(struct radeon_device *rdev)
        WREG32(CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
        WREG32(CP_INT_CNTL_RING1, 0);
        WREG32(CP_INT_CNTL_RING2, 0);
+       tmp = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
+       WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, tmp);
+       tmp = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
+       WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, tmp);
        WREG32(GRBM_INT_CNTL, 0);
        WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
        WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
@@ -3167,6 +3316,7 @@ int si_irq_set(struct radeon_device *rdev)
        u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
        u32 grbm_int_cntl = 0;
        u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
+       u32 dma_cntl, dma_cntl1;
 
        if (!rdev->irq.installed) {
                WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
@@ -3187,6 +3337,9 @@ int si_irq_set(struct radeon_device *rdev)
        hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
        hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
 
+       dma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
+       dma_cntl1 = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
+
        /* enable CP interrupts on all rings */
        if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
                DRM_DEBUG("si_irq_set: sw int gfx\n");
@@ -3200,6 +3353,15 @@ int si_irq_set(struct radeon_device *rdev)
                DRM_DEBUG("si_irq_set: sw int cp2\n");
                cp_int_cntl2 |= TIME_STAMP_INT_ENABLE;
        }
+       if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
+               DRM_DEBUG("si_irq_set: sw int dma\n");
+               dma_cntl |= TRAP_ENABLE;
+       }
+
+       if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_DMA1_INDEX])) {
+               DRM_DEBUG("si_irq_set: sw int dma1\n");
+               dma_cntl1 |= TRAP_ENABLE;
+       }
        if (rdev->irq.crtc_vblank_int[0] ||
            atomic_read(&rdev->irq.pflip[0])) {
                DRM_DEBUG("si_irq_set: vblank 0\n");
@@ -3259,6 +3421,9 @@ int si_irq_set(struct radeon_device *rdev)
        WREG32(CP_INT_CNTL_RING1, cp_int_cntl1);
        WREG32(CP_INT_CNTL_RING2, cp_int_cntl2);
 
+       WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, dma_cntl);
+       WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, dma_cntl1);
+
        WREG32(GRBM_INT_CNTL, grbm_int_cntl);
 
        WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
@@ -3684,6 +3849,16 @@ restart_ih:
                                break;
                        }
                        break;
+               case 146:
+               case 147:
+                       dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
+                       dev_err(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
+                               RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
+                       dev_err(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
+                               RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
+                       /* reset addr and status */
+                       WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
+                       break;
                case 176: /* RINGID0 CP_INT */
                        radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
                        break;
@@ -3707,9 +3882,17 @@ restart_ih:
                                break;
                        }
                        break;
+               case 224: /* DMA trap event */
+                       DRM_DEBUG("IH: DMA trap\n");
+                       radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
+                       break;
                case 233: /* GUI IDLE */
                        DRM_DEBUG("IH: GUI idle\n");
                        break;
+               case 244: /* DMA trap event */
+                       DRM_DEBUG("IH: DMA1 trap\n");
+                       radeon_fence_process(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
+                       break;
                default:
                        DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
                        break;
@@ -3733,6 +3916,80 @@ restart_ih:
        return IRQ_HANDLED;
 }
 
+/**
+ * si_copy_dma - copy pages using the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @fence: radeon fence object
+ *
+ * Copy GPU paging using the DMA engine (SI).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+int si_copy_dma(struct radeon_device *rdev,
+               uint64_t src_offset, uint64_t dst_offset,
+               unsigned num_gpu_pages,
+               struct radeon_fence **fence)
+{
+       struct radeon_semaphore *sem = NULL;
+       int ring_index = rdev->asic->copy.dma_ring_index;
+       struct radeon_ring *ring = &rdev->ring[ring_index];
+       u32 size_in_bytes, cur_size_in_bytes;
+       int i, num_loops;
+       int r = 0;
+
+       r = radeon_semaphore_create(rdev, &sem);
+       if (r) {
+               DRM_ERROR("radeon: moving bo (%d).\n", r);
+               return r;
+       }
+
+       size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
+       num_loops = DIV_ROUND_UP(size_in_bytes, 0xfffff);
+       r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
+       if (r) {
+               DRM_ERROR("radeon: moving bo (%d).\n", r);
+               radeon_semaphore_free(rdev, &sem, NULL);
+               return r;
+       }
+
+       if (radeon_fence_need_sync(*fence, ring->idx)) {
+               radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
+                                           ring->idx);
+               radeon_fence_note_sync(*fence, ring->idx);
+       } else {
+               radeon_semaphore_free(rdev, &sem, NULL);
+       }
+
+       for (i = 0; i < num_loops; i++) {
+               cur_size_in_bytes = size_in_bytes;
+               if (cur_size_in_bytes > 0xFFFFF)
+                       cur_size_in_bytes = 0xFFFFF;
+               size_in_bytes -= cur_size_in_bytes;
+               radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 1, 0, 0, cur_size_in_bytes));
+               radeon_ring_write(ring, dst_offset & 0xffffffff);
+               radeon_ring_write(ring, src_offset & 0xffffffff);
+               radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
+               radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
+               src_offset += cur_size_in_bytes;
+               dst_offset += cur_size_in_bytes;
+       }
+
+       r = radeon_fence_emit(rdev, fence, ring->idx);
+       if (r) {
+               radeon_ring_unlock_undo(rdev, ring);
+               return r;
+       }
+
+       radeon_ring_unlock_commit(rdev, ring);
+       radeon_semaphore_free(rdev, &sem, *fence);
+
+       return r;
+}
+
 /*
  * startup/shutdown callbacks
  */
@@ -3804,6 +4061,18 @@ static int si_startup(struct radeon_device *rdev)
                return r;
        }
 
+       r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
+       if (r) {
+               dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+               return r;
+       }
+
+       r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
+       if (r) {
+               dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+               return r;
+       }
+
        /* Enable IRQ */
        r = si_irq_init(rdev);
        if (r) {
@@ -3834,6 +4103,22 @@ static int si_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
+       ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+       r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
+                            DMA_RB_RPTR + DMA0_REGISTER_OFFSET,
+                            DMA_RB_WPTR + DMA0_REGISTER_OFFSET,
+                            2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
+       if (r)
+               return r;
+
+       ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
+       r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
+                            DMA_RB_RPTR + DMA1_REGISTER_OFFSET,
+                            DMA_RB_WPTR + DMA1_REGISTER_OFFSET,
+                            2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
+       if (r)
+               return r;
+
        r = si_cp_load_microcode(rdev);
        if (r)
                return r;
@@ -3841,6 +4126,10 @@ static int si_startup(struct radeon_device *rdev)
        if (r)
                return r;
 
+       r = cayman_dma_resume(rdev);
+       if (r)
+               return r;
+
        r = radeon_ib_pool_init(rdev);
        if (r) {
                dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
@@ -3882,9 +4171,7 @@ int si_resume(struct radeon_device *rdev)
 int si_suspend(struct radeon_device *rdev)
 {
        si_cp_enable(rdev, false);
-       rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
-       rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
-       rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
+       cayman_dma_stop(rdev);
        si_irq_suspend(rdev);
        radeon_wb_disable(rdev);
        si_pcie_gart_disable(rdev);
@@ -3962,6 +4249,14 @@ int si_init(struct radeon_device *rdev)
        ring->ring_obj = NULL;
        r600_ring_init(rdev, ring, 1024 * 1024);
 
+       ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+       ring->ring_obj = NULL;
+       r600_ring_init(rdev, ring, 64 * 1024);
+
+       ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
+       ring->ring_obj = NULL;
+       r600_ring_init(rdev, ring, 64 * 1024);
+
        rdev->ih.ring_obj = NULL;
        r600_ih_ring_init(rdev, 64 * 1024);
 
@@ -3974,6 +4269,7 @@ int si_init(struct radeon_device *rdev)
        if (r) {
                dev_err(rdev->dev, "disabling GPU acceleration\n");
                si_cp_fini(rdev);
+               cayman_dma_fini(rdev);
                si_irq_fini(rdev);
                si_rlc_fini(rdev);
                radeon_wb_fini(rdev);
@@ -4002,6 +4298,7 @@ void si_fini(struct radeon_device *rdev)
        r600_blit_fini(rdev);
 #endif
        si_cp_fini(rdev);
+       cayman_dma_fini(rdev);
        si_irq_fini(rdev);
        si_rlc_fini(rdev);
        radeon_wb_fini(rdev);
index a8871af..62b4621 100644 (file)
 #define VM_CONTEXT0_CNTL                               0x1410
 #define                ENABLE_CONTEXT                                  (1 << 0)
 #define                PAGE_TABLE_DEPTH(x)                             (((x) & 3) << 1)
+#define                RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT         (1 << 3)
 #define                RANGE_PROTECTION_FAULT_ENABLE_DEFAULT           (1 << 4)
+#define                DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT    (1 << 6)
+#define                DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT      (1 << 7)
+#define                PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT          (1 << 9)
+#define                PDE0_PROTECTION_FAULT_ENABLE_DEFAULT            (1 << 10)
+#define                VALID_PROTECTION_FAULT_ENABLE_INTERRUPT         (1 << 12)
+#define                VALID_PROTECTION_FAULT_ENABLE_DEFAULT           (1 << 13)
+#define                READ_PROTECTION_FAULT_ENABLE_INTERRUPT          (1 << 15)
+#define                READ_PROTECTION_FAULT_ENABLE_DEFAULT            (1 << 16)
+#define                WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT         (1 << 18)
+#define                WRITE_PROTECTION_FAULT_ENABLE_DEFAULT           (1 << 19)
 #define VM_CONTEXT1_CNTL                               0x1414
 #define VM_CONTEXT0_CNTL2                              0x1430
 #define VM_CONTEXT1_CNTL2                              0x1434
 #define        VM_CONTEXT14_PAGE_TABLE_BASE_ADDR               0x1450
 #define        VM_CONTEXT15_PAGE_TABLE_BASE_ADDR               0x1454
 
+#define        VM_CONTEXT1_PROTECTION_FAULT_ADDR               0x14FC
+#define        VM_CONTEXT1_PROTECTION_FAULT_STATUS             0x14DC
+
 #define VM_INVALIDATE_REQUEST                          0x1478
 #define VM_INVALIDATE_RESPONSE                         0x147c
 
 #define        PACKET3_WAIT_REG_MEM                            0x3C
 #define        PACKET3_MEM_WRITE                               0x3D
 #define        PACKET3_COPY_DATA                               0x40
+#define        PACKET3_CP_DMA                                  0x41
+/* 1. header
+ * 2. SRC_ADDR_LO or DATA [31:0]
+ * 3. CP_SYNC [31] | SRC_SEL [30:29] | ENGINE [27] | DST_SEL [21:20] |
+ *    SRC_ADDR_HI [7:0]
+ * 4. DST_ADDR_LO [31:0]
+ * 5. DST_ADDR_HI [7:0]
+ * 6. COMMAND [30:21] | BYTE_COUNT [20:0]
+ */
+#              define PACKET3_CP_DMA_DST_SEL(x)    ((x) << 20)
+                /* 0 - SRC_ADDR
+                * 1 - GDS
+                */
+#              define PACKET3_CP_DMA_ENGINE(x)     ((x) << 27)
+                /* 0 - ME
+                * 1 - PFP
+                */
+#              define PACKET3_CP_DMA_SRC_SEL(x)    ((x) << 29)
+                /* 0 - SRC_ADDR
+                * 1 - GDS
+                * 2 - DATA
+                */
+#              define PACKET3_CP_DMA_CP_SYNC       (1 << 31)
+/* COMMAND */
+#              define PACKET3_CP_DMA_DIS_WC        (1 << 21)
+#              define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23)
+                /* 0 - none
+                * 1 - 8 in 16
+                * 2 - 8 in 32
+                * 3 - 8 in 64
+                */
+#              define PACKET3_CP_DMA_CMD_DST_SWAP(x) ((x) << 24)
+                /* 0 - none
+                * 1 - 8 in 16
+                * 2 - 8 in 32
+                * 3 - 8 in 64
+                */
+#              define PACKET3_CP_DMA_CMD_SAS       (1 << 26)
+                /* 0 - memory
+                * 1 - register
+                */
+#              define PACKET3_CP_DMA_CMD_DAS       (1 << 27)
+                /* 0 - memory
+                * 1 - register
+                */
+#              define PACKET3_CP_DMA_CMD_SAIC      (1 << 28)
+#              define PACKET3_CP_DMA_CMD_DAIC      (1 << 29)
+#              define PACKET3_CP_DMA_CMD_RAW_WAIT  (1 << 30)
 #define        PACKET3_PFP_SYNC_ME                             0x42
 #define        PACKET3_SURFACE_SYNC                            0x43
 #              define PACKET3_DEST_BASE_0_ENA      (1 << 0)
 #define        PACKET3_WAIT_ON_AVAIL_BUFFER                    0x8A
 #define        PACKET3_SWITCH_BUFFER                           0x8B
 
+/* ASYNC DMA - first instance at 0xd000, second at 0xd800 */
+#define DMA0_REGISTER_OFFSET                              0x0 /* not a register */
+#define DMA1_REGISTER_OFFSET                              0x800 /* not a register */
+
+#define DMA_RB_CNTL                                       0xd000
+#       define DMA_RB_ENABLE                              (1 << 0)
+#       define DMA_RB_SIZE(x)                             ((x) << 1) /* log2 */
+#       define DMA_RB_SWAP_ENABLE                         (1 << 9) /* 8IN32 */
+#       define DMA_RPTR_WRITEBACK_ENABLE                  (1 << 12)
+#       define DMA_RPTR_WRITEBACK_SWAP_ENABLE             (1 << 13)  /* 8IN32 */
+#       define DMA_RPTR_WRITEBACK_TIMER(x)                ((x) << 16) /* log2 */
+#define DMA_RB_BASE                                       0xd004
+#define DMA_RB_RPTR                                       0xd008
+#define DMA_RB_WPTR                                       0xd00c
+
+#define DMA_RB_RPTR_ADDR_HI                               0xd01c
+#define DMA_RB_RPTR_ADDR_LO                               0xd020
+
+#define DMA_IB_CNTL                                       0xd024
+#       define DMA_IB_ENABLE                              (1 << 0)
+#       define DMA_IB_SWAP_ENABLE                         (1 << 4)
+#define DMA_IB_RPTR                                       0xd028
+#define DMA_CNTL                                          0xd02c
+#       define TRAP_ENABLE                                (1 << 0)
+#       define SEM_INCOMPLETE_INT_ENABLE                  (1 << 1)
+#       define SEM_WAIT_INT_ENABLE                        (1 << 2)
+#       define DATA_SWAP_ENABLE                           (1 << 3)
+#       define FENCE_SWAP_ENABLE                          (1 << 4)
+#       define CTXEMPTY_INT_ENABLE                        (1 << 28)
+#define DMA_TILING_CONFIG                                0xd0b8
+
+#define DMA_PACKET(cmd, b, t, s, n)    ((((cmd) & 0xF) << 28) |        \
+                                        (((b) & 0x1) << 26) |          \
+                                        (((t) & 0x1) << 23) |          \
+                                        (((s) & 0x1) << 22) |          \
+                                        (((n) & 0xFFFFF) << 0))
+
+#define DMA_IB_PACKET(cmd, vmid, n)    ((((cmd) & 0xF) << 28) |        \
+                                        (((vmid) & 0xF) << 20) |       \
+                                        (((n) & 0xFFFFF) << 0))
+
+#define DMA_PTE_PDE_PACKET(n)          ((2 << 28) |                    \
+                                        (1 << 26) |                    \
+                                        (1 << 21) |                    \
+                                        (((n) & 0xFFFFF) << 0))
+
+/* async DMA Packet types */
+#define        DMA_PACKET_WRITE                                  0x2
+#define        DMA_PACKET_COPY                                   0x3
+#define        DMA_PACKET_INDIRECT_BUFFER                        0x4
+#define        DMA_PACKET_SEMAPHORE                              0x5
+#define        DMA_PACKET_FENCE                                  0x6
+#define        DMA_PACKET_TRAP                                   0x7
+#define        DMA_PACKET_SRBM_WRITE                             0x9
+#define        DMA_PACKET_CONSTANT_FILL                          0xd
+#define        DMA_PACKET_NOP                                    0xf
+
 #endif
index 0e7a930..d917a41 100644 (file)
@@ -748,7 +748,7 @@ int shmob_drm_connector_create(struct shmob_drm_device *sdev,
        connector->encoder = encoder;
 
        drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
-       drm_connector_property_set_value(connector,
+       drm_object_property_set_value(&connector->base,
                sdev->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF);
 
        return 0;
diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/drm/tegra/Kconfig
new file mode 100644 (file)
index 0000000..be1daf7
--- /dev/null
@@ -0,0 +1,23 @@
+config DRM_TEGRA
+       tristate "NVIDIA Tegra DRM"
+       depends on DRM && OF && ARCH_TEGRA
+       select DRM_KMS_HELPER
+       select DRM_GEM_CMA_HELPER
+       select DRM_KMS_CMA_HELPER
+       select FB_CFB_FILLRECT
+       select FB_CFB_COPYAREA
+       select FB_CFB_IMAGEBLIT
+       help
+         Choose this option if you have an NVIDIA Tegra SoC.
+
+         To compile this driver as a module, choose M here: the module
+         will be called tegra-drm.
+
+if DRM_TEGRA
+
+config DRM_TEGRA_DEBUG
+       bool "NVIDIA Tegra DRM debug support"
+       help
+         Say yes here to enable debugging support.
+
+endif
diff --git a/drivers/gpu/drm/tegra/Makefile b/drivers/gpu/drm/tegra/Makefile
new file mode 100644 (file)
index 0000000..80f73d1
--- /dev/null
@@ -0,0 +1,7 @@
+ccflags-y := -Iinclude/drm
+ccflags-$(CONFIG_DRM_TEGRA_DEBUG) += -DDEBUG
+
+tegra-drm-y := drm.o fb.o dc.o host1x.o
+tegra-drm-y += output.o rgb.o hdmi.o
+
+obj-$(CONFIG_DRM_TEGRA) += tegra-drm.o
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
new file mode 100644 (file)
index 0000000..0744103
--- /dev/null
@@ -0,0 +1,834 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012 NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <mach/clk.h>
+
+#include "drm.h"
+#include "dc.h"
+
+struct tegra_dc_window {
+       fixed20_12 x;
+       fixed20_12 y;
+       fixed20_12 w;
+       fixed20_12 h;
+       unsigned int outx;
+       unsigned int outy;
+       unsigned int outw;
+       unsigned int outh;
+       unsigned int stride;
+       unsigned int fmt;
+};
+
+static const struct drm_crtc_funcs tegra_crtc_funcs = {
+       .set_config = drm_crtc_helper_set_config,
+       .destroy = drm_crtc_cleanup,
+};
+
+static void tegra_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+}
+
+static bool tegra_crtc_mode_fixup(struct drm_crtc *crtc,
+                                 const struct drm_display_mode *mode,
+                                 struct drm_display_mode *adjusted)
+{
+       return true;
+}
+
+static inline u32 compute_dda_inc(fixed20_12 inf, unsigned int out, bool v,
+                                 unsigned int bpp)
+{
+       fixed20_12 outf = dfixed_init(out);
+       u32 dda_inc;
+       int max;
+
+       if (v)
+               max = 15;
+       else {
+               switch (bpp) {
+               case 2:
+                       max = 8;
+                       break;
+
+               default:
+                       WARN_ON_ONCE(1);
+                       /* fallthrough */
+               case 4:
+                       max = 4;
+                       break;
+               }
+       }
+
+       outf.full = max_t(u32, outf.full - dfixed_const(1), dfixed_const(1));
+       inf.full -= dfixed_const(1);
+
+       dda_inc = dfixed_div(inf, outf);
+       dda_inc = min_t(u32, dda_inc, dfixed_const(max));
+
+       return dda_inc;
+}
+
+static inline u32 compute_initial_dda(fixed20_12 in)
+{
+       return dfixed_frac(in);
+}
+
+static int tegra_dc_set_timings(struct tegra_dc *dc,
+                               struct drm_display_mode *mode)
+{
+       /* TODO: For HDMI compliance, h & v ref_to_sync should be set to 1 */
+       unsigned int h_ref_to_sync = 0;
+       unsigned int v_ref_to_sync = 0;
+       unsigned long value;
+
+       tegra_dc_writel(dc, 0x0, DC_DISP_DISP_TIMING_OPTIONS);
+
+       value = (v_ref_to_sync << 16) | h_ref_to_sync;
+       tegra_dc_writel(dc, value, DC_DISP_REF_TO_SYNC);
+
+       value = ((mode->vsync_end - mode->vsync_start) << 16) |
+               ((mode->hsync_end - mode->hsync_start) <<  0);
+       tegra_dc_writel(dc, value, DC_DISP_SYNC_WIDTH);
+
+       value = ((mode->vsync_start - mode->vdisplay) << 16) |
+               ((mode->hsync_start - mode->hdisplay) <<  0);
+       tegra_dc_writel(dc, value, DC_DISP_BACK_PORCH);
+
+       value = ((mode->vtotal - mode->vsync_end) << 16) |
+               ((mode->htotal - mode->hsync_end) <<  0);
+       tegra_dc_writel(dc, value, DC_DISP_FRONT_PORCH);
+
+       value = (mode->vdisplay << 16) | mode->hdisplay;
+       tegra_dc_writel(dc, value, DC_DISP_ACTIVE);
+
+       return 0;
+}
+
+static int tegra_crtc_setup_clk(struct drm_crtc *crtc,
+                               struct drm_display_mode *mode,
+                               unsigned long *div)
+{
+       unsigned long pclk = mode->clock * 1000, rate;
+       struct tegra_dc *dc = to_tegra_dc(crtc);
+       struct tegra_output *output = NULL;
+       struct drm_encoder *encoder;
+       long err;
+
+       list_for_each_entry(encoder, &crtc->dev->mode_config.encoder_list, head)
+               if (encoder->crtc == crtc) {
+                       output = encoder_to_output(encoder);
+                       break;
+               }
+
+       if (!output)
+               return -ENODEV;
+
+       /*
+        * This assumes that the display controller will divide its parent
+        * clock by 2 to generate the pixel clock.
+        */
+       err = tegra_output_setup_clock(output, dc->clk, pclk * 2);
+       if (err < 0) {
+               dev_err(dc->dev, "failed to setup clock: %ld\n", err);
+               return err;
+       }
+
+       rate = clk_get_rate(dc->clk);
+       *div = (rate * 2 / pclk) - 2;
+
+       DRM_DEBUG_KMS("rate: %lu, div: %lu\n", rate, *div);
+
+       return 0;
+}
+
+static int tegra_crtc_mode_set(struct drm_crtc *crtc,
+                              struct drm_display_mode *mode,
+                              struct drm_display_mode *adjusted,
+                              int x, int y, struct drm_framebuffer *old_fb)
+{
+       struct tegra_framebuffer *fb = to_tegra_fb(crtc->fb);
+       struct tegra_dc *dc = to_tegra_dc(crtc);
+       unsigned int h_dda, v_dda, bpp;
+       struct tegra_dc_window win;
+       unsigned long div, value;
+       int err;
+
+       err = tegra_crtc_setup_clk(crtc, mode, &div);
+       if (err) {
+               dev_err(dc->dev, "failed to setup clock for CRTC: %d\n", err);
+               return err;
+       }
+
+       /* program display mode */
+       tegra_dc_set_timings(dc, mode);
+
+       value = DE_SELECT_ACTIVE | DE_CONTROL_NORMAL;
+       tegra_dc_writel(dc, value, DC_DISP_DATA_ENABLE_OPTIONS);
+
+       value = tegra_dc_readl(dc, DC_COM_PIN_OUTPUT_POLARITY(1));
+       value &= ~LVS_OUTPUT_POLARITY_LOW;
+       value &= ~LHS_OUTPUT_POLARITY_LOW;
+       tegra_dc_writel(dc, value, DC_COM_PIN_OUTPUT_POLARITY(1));
+
+       value = DISP_DATA_FORMAT_DF1P1C | DISP_ALIGNMENT_MSB |
+               DISP_ORDER_RED_BLUE;
+       tegra_dc_writel(dc, value, DC_DISP_DISP_INTERFACE_CONTROL);
+
+       tegra_dc_writel(dc, 0x00010001, DC_DISP_SHIFT_CLOCK_OPTIONS);
+
+       value = SHIFT_CLK_DIVIDER(div) | PIXEL_CLK_DIVIDER_PCD1;
+       tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL);
+
+       /* setup window parameters */
+       memset(&win, 0, sizeof(win));
+       win.x.full = dfixed_const(0);
+       win.y.full = dfixed_const(0);
+       win.w.full = dfixed_const(mode->hdisplay);
+       win.h.full = dfixed_const(mode->vdisplay);
+       win.outx = 0;
+       win.outy = 0;
+       win.outw = mode->hdisplay;
+       win.outh = mode->vdisplay;
+
+       switch (crtc->fb->pixel_format) {
+       case DRM_FORMAT_XRGB8888:
+               win.fmt = WIN_COLOR_DEPTH_B8G8R8A8;
+               break;
+
+       case DRM_FORMAT_RGB565:
+               win.fmt = WIN_COLOR_DEPTH_B5G6R5;
+               break;
+
+       default:
+               win.fmt = WIN_COLOR_DEPTH_B8G8R8A8;
+               WARN_ON(1);
+               break;
+       }
+
+       bpp = crtc->fb->bits_per_pixel / 8;
+       win.stride = crtc->fb->pitches[0];
+
+       /* program window registers */
+       value = tegra_dc_readl(dc, DC_CMD_DISPLAY_WINDOW_HEADER);
+       value |= WINDOW_A_SELECT;
+       tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER);
+
+       tegra_dc_writel(dc, win.fmt, DC_WIN_COLOR_DEPTH);
+       tegra_dc_writel(dc, 0, DC_WIN_BYTE_SWAP);
+
+       value = V_POSITION(win.outy) | H_POSITION(win.outx);
+       tegra_dc_writel(dc, value, DC_WIN_POSITION);
+
+       value = V_SIZE(win.outh) | H_SIZE(win.outw);
+       tegra_dc_writel(dc, value, DC_WIN_SIZE);
+
+       value = V_PRESCALED_SIZE(dfixed_trunc(win.h)) |
+               H_PRESCALED_SIZE(dfixed_trunc(win.w) * bpp);
+       tegra_dc_writel(dc, value, DC_WIN_PRESCALED_SIZE);
+
+       h_dda = compute_dda_inc(win.w, win.outw, false, bpp);
+       v_dda = compute_dda_inc(win.h, win.outh, true, bpp);
+
+       value = V_DDA_INC(v_dda) | H_DDA_INC(h_dda);
+       tegra_dc_writel(dc, value, DC_WIN_DDA_INC);
+
+       h_dda = compute_initial_dda(win.x);
+       v_dda = compute_initial_dda(win.y);
+
+       tegra_dc_writel(dc, h_dda, DC_WIN_H_INITIAL_DDA);
+       tegra_dc_writel(dc, v_dda, DC_WIN_V_INITIAL_DDA);
+
+       tegra_dc_writel(dc, 0, DC_WIN_UV_BUF_STRIDE);
+       tegra_dc_writel(dc, 0, DC_WIN_BUF_STRIDE);
+
+       tegra_dc_writel(dc, fb->obj->paddr, DC_WINBUF_START_ADDR);
+       tegra_dc_writel(dc, win.stride, DC_WIN_LINE_STRIDE);
+       tegra_dc_writel(dc, dfixed_trunc(win.x) * bpp,
+                       DC_WINBUF_ADDR_H_OFFSET);
+       tegra_dc_writel(dc, dfixed_trunc(win.y), DC_WINBUF_ADDR_V_OFFSET);
+
+       value = WIN_ENABLE;
+
+       if (bpp < 24)
+               value |= COLOR_EXPAND;
+
+       tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS);
+
+       tegra_dc_writel(dc, 0xff00, DC_WIN_BLEND_NOKEY);
+       tegra_dc_writel(dc, 0xff00, DC_WIN_BLEND_1WIN);
+
+       return 0;
+}
+
+static void tegra_crtc_prepare(struct drm_crtc *crtc)
+{
+       struct tegra_dc *dc = to_tegra_dc(crtc);
+       unsigned int syncpt;
+       unsigned long value;
+
+       /* hardware initialization */
+       tegra_periph_reset_deassert(dc->clk);
+       usleep_range(10000, 20000);
+
+       if (dc->pipe)
+               syncpt = SYNCPT_VBLANK1;
+       else
+               syncpt = SYNCPT_VBLANK0;
+
+       /* initialize display controller */
+       tegra_dc_writel(dc, 0x00000100, DC_CMD_GENERAL_INCR_SYNCPT_CNTRL);
+       tegra_dc_writel(dc, 0x100 | syncpt, DC_CMD_CONT_SYNCPT_VSYNC);
+
+       value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | WIN_A_OF_INT;
+       tegra_dc_writel(dc, value, DC_CMD_INT_TYPE);
+
+       value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
+               WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
+       tegra_dc_writel(dc, value, DC_CMD_INT_POLARITY);
+
+       value = PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
+               PW4_ENABLE | PM0_ENABLE | PM1_ENABLE;
+       tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
+
+       value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND);
+       value |= DISP_CTRL_MODE_C_DISPLAY;
+       tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND);
+
+       /* initialize timer */
+       value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(0x20) |
+               WINDOW_B_THRESHOLD(0x20) | WINDOW_C_THRESHOLD(0x20);
+       tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY);
+
+       value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(1) |
+               WINDOW_B_THRESHOLD(1) | WINDOW_C_THRESHOLD(1);
+       tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER);
+
+       value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
+       tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
+
+       value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
+       tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE);
+}
+
+static void tegra_crtc_commit(struct drm_crtc *crtc)
+{
+       struct tegra_dc *dc = to_tegra_dc(crtc);
+       unsigned long update_mask;
+       unsigned long value;
+
+       update_mask = GENERAL_ACT_REQ | WIN_A_ACT_REQ;
+
+       tegra_dc_writel(dc, update_mask << 8, DC_CMD_STATE_CONTROL);
+
+       value = tegra_dc_readl(dc, DC_CMD_INT_ENABLE);
+       value |= FRAME_END_INT;
+       tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE);
+
+       value = tegra_dc_readl(dc, DC_CMD_INT_MASK);
+       value |= FRAME_END_INT;
+       tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
+
+       tegra_dc_writel(dc, update_mask, DC_CMD_STATE_CONTROL);
+}
+
+static void tegra_crtc_load_lut(struct drm_crtc *crtc)
+{
+}
+
+static const struct drm_crtc_helper_funcs tegra_crtc_helper_funcs = {
+       .dpms = tegra_crtc_dpms,
+       .mode_fixup = tegra_crtc_mode_fixup,
+       .mode_set = tegra_crtc_mode_set,
+       .prepare = tegra_crtc_prepare,
+       .commit = tegra_crtc_commit,
+       .load_lut = tegra_crtc_load_lut,
+};
+
+static irqreturn_t tegra_drm_irq(int irq, void *data)
+{
+       struct tegra_dc *dc = data;
+       unsigned long status;
+
+       status = tegra_dc_readl(dc, DC_CMD_INT_STATUS);
+       tegra_dc_writel(dc, status, DC_CMD_INT_STATUS);
+
+       if (status & FRAME_END_INT) {
+               /*
+               dev_dbg(dc->dev, "%s(): frame end\n", __func__);
+               */
+       }
+
+       if (status & VBLANK_INT) {
+               /*
+               dev_dbg(dc->dev, "%s(): vertical blank\n", __func__);
+               */
+               drm_handle_vblank(dc->base.dev, dc->pipe);
+       }
+
+       if (status & (WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT)) {
+               /*
+               dev_dbg(dc->dev, "%s(): underflow\n", __func__);
+               */
+       }
+
+       return IRQ_HANDLED;
+}
+
+static int tegra_dc_show_regs(struct seq_file *s, void *data)
+{
+       struct drm_info_node *node = s->private;
+       struct tegra_dc *dc = node->info_ent->data;
+
+#define DUMP_REG(name)                                         \
+       seq_printf(s, "%-40s %#05x %08lx\n", #name, name,       \
+                  tegra_dc_readl(dc, name))
+
+       DUMP_REG(DC_CMD_GENERAL_INCR_SYNCPT);
+       DUMP_REG(DC_CMD_GENERAL_INCR_SYNCPT_CNTRL);
+       DUMP_REG(DC_CMD_GENERAL_INCR_SYNCPT_ERROR);
+       DUMP_REG(DC_CMD_WIN_A_INCR_SYNCPT);
+       DUMP_REG(DC_CMD_WIN_A_INCR_SYNCPT_CNTRL);
+       DUMP_REG(DC_CMD_WIN_A_INCR_SYNCPT_ERROR);
+       DUMP_REG(DC_CMD_WIN_B_INCR_SYNCPT);
+       DUMP_REG(DC_CMD_WIN_B_INCR_SYNCPT_CNTRL);
+       DUMP_REG(DC_CMD_WIN_B_INCR_SYNCPT_ERROR);
+       DUMP_REG(DC_CMD_WIN_C_INCR_SYNCPT);
+       DUMP_REG(DC_CMD_WIN_C_INCR_SYNCPT_CNTRL);
+       DUMP_REG(DC_CMD_WIN_C_INCR_SYNCPT_ERROR);
+       DUMP_REG(DC_CMD_CONT_SYNCPT_VSYNC);
+       DUMP_REG(DC_CMD_DISPLAY_COMMAND_OPTION0);
+       DUMP_REG(DC_CMD_DISPLAY_COMMAND);
+       DUMP_REG(DC_CMD_SIGNAL_RAISE);
+       DUMP_REG(DC_CMD_DISPLAY_POWER_CONTROL);
+       DUMP_REG(DC_CMD_INT_STATUS);
+       DUMP_REG(DC_CMD_INT_MASK);
+       DUMP_REG(DC_CMD_INT_ENABLE);
+       DUMP_REG(DC_CMD_INT_TYPE);
+       DUMP_REG(DC_CMD_INT_POLARITY);
+       DUMP_REG(DC_CMD_SIGNAL_RAISE1);
+       DUMP_REG(DC_CMD_SIGNAL_RAISE2);
+       DUMP_REG(DC_CMD_SIGNAL_RAISE3);
+       DUMP_REG(DC_CMD_STATE_ACCESS);
+       DUMP_REG(DC_CMD_STATE_CONTROL);
+       DUMP_REG(DC_CMD_DISPLAY_WINDOW_HEADER);
+       DUMP_REG(DC_CMD_REG_ACT_CONTROL);
+       DUMP_REG(DC_COM_CRC_CONTROL);
+       DUMP_REG(DC_COM_CRC_CHECKSUM);
+       DUMP_REG(DC_COM_PIN_OUTPUT_ENABLE(0));
+       DUMP_REG(DC_COM_PIN_OUTPUT_ENABLE(1));
+       DUMP_REG(DC_COM_PIN_OUTPUT_ENABLE(2));
+       DUMP_REG(DC_COM_PIN_OUTPUT_ENABLE(3));
+       DUMP_REG(DC_COM_PIN_OUTPUT_POLARITY(0));
+       DUMP_REG(DC_COM_PIN_OUTPUT_POLARITY(1));
+       DUMP_REG(DC_COM_PIN_OUTPUT_POLARITY(2));
+       DUMP_REG(DC_COM_PIN_OUTPUT_POLARITY(3));
+       DUMP_REG(DC_COM_PIN_OUTPUT_DATA(0));
+       DUMP_REG(DC_COM_PIN_OUTPUT_DATA(1));
+       DUMP_REG(DC_COM_PIN_OUTPUT_DATA(2));
+       DUMP_REG(DC_COM_PIN_OUTPUT_DATA(3));
+       DUMP_REG(DC_COM_PIN_INPUT_ENABLE(0));
+       DUMP_REG(DC_COM_PIN_INPUT_ENABLE(1));
+       DUMP_REG(DC_COM_PIN_INPUT_ENABLE(2));
+       DUMP_REG(DC_COM_PIN_INPUT_ENABLE(3));
+       DUMP_REG(DC_COM_PIN_INPUT_DATA(0));
+       DUMP_REG(DC_COM_PIN_INPUT_DATA(1));
+       DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(0));
+       DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(1));
+       DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(2));
+       DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(3));
+       DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(4));
+       DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(5));
+       DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(6));
+       DUMP_REG(DC_COM_PIN_MISC_CONTROL);
+       DUMP_REG(DC_COM_PIN_PM0_CONTROL);
+       DUMP_REG(DC_COM_PIN_PM0_DUTY_CYCLE);
+       DUMP_REG(DC_COM_PIN_PM1_CONTROL);
+       DUMP_REG(DC_COM_PIN_PM1_DUTY_CYCLE);
+       DUMP_REG(DC_COM_SPI_CONTROL);
+       DUMP_REG(DC_COM_SPI_START_BYTE);
+       DUMP_REG(DC_COM_HSPI_WRITE_DATA_AB);
+       DUMP_REG(DC_COM_HSPI_WRITE_DATA_CD);
+       DUMP_REG(DC_COM_HSPI_CS_DC);
+       DUMP_REG(DC_COM_SCRATCH_REGISTER_A);
+       DUMP_REG(DC_COM_SCRATCH_REGISTER_B);
+       DUMP_REG(DC_COM_GPIO_CTRL);
+       DUMP_REG(DC_COM_GPIO_DEBOUNCE_COUNTER);
+       DUMP_REG(DC_COM_CRC_CHECKSUM_LATCHED);
+       DUMP_REG(DC_DISP_DISP_SIGNAL_OPTIONS0);
+       DUMP_REG(DC_DISP_DISP_SIGNAL_OPTIONS1);
+       DUMP_REG(DC_DISP_DISP_WIN_OPTIONS);
+       DUMP_REG(DC_DISP_DISP_MEM_HIGH_PRIORITY);
+       DUMP_REG(DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER);
+       DUMP_REG(DC_DISP_DISP_TIMING_OPTIONS);
+       DUMP_REG(DC_DISP_REF_TO_SYNC);
+       DUMP_REG(DC_DISP_SYNC_WIDTH);
+       DUMP_REG(DC_DISP_BACK_PORCH);
+       DUMP_REG(DC_DISP_ACTIVE);
+       DUMP_REG(DC_DISP_FRONT_PORCH);
+       DUMP_REG(DC_DISP_H_PULSE0_CONTROL);
+       DUMP_REG(DC_DISP_H_PULSE0_POSITION_A);
+       DUMP_REG(DC_DISP_H_PULSE0_POSITION_B);
+       DUMP_REG(DC_DISP_H_PULSE0_POSITION_C);
+       DUMP_REG(DC_DISP_H_PULSE0_POSITION_D);
+       DUMP_REG(DC_DISP_H_PULSE1_CONTROL);
+       DUMP_REG(DC_DISP_H_PULSE1_POSITION_A);
+       DUMP_REG(DC_DISP_H_PULSE1_POSITION_B);
+       DUMP_REG(DC_DISP_H_PULSE1_POSITION_C);
+       DUMP_REG(DC_DISP_H_PULSE1_POSITION_D);
+       DUMP_REG(DC_DISP_H_PULSE2_CONTROL);
+       DUMP_REG(DC_DISP_H_PULSE2_POSITION_A);
+       DUMP_REG(DC_DISP_H_PULSE2_POSITION_B);
+       DUMP_REG(DC_DISP_H_PULSE2_POSITION_C);
+       DUMP_REG(DC_DISP_H_PULSE2_POSITION_D);
+       DUMP_REG(DC_DISP_V_PULSE0_CONTROL);
+       DUMP_REG(DC_DISP_V_PULSE0_POSITION_A);
+       DUMP_REG(DC_DISP_V_PULSE0_POSITION_B);
+       DUMP_REG(DC_DISP_V_PULSE0_POSITION_C);
+       DUMP_REG(DC_DISP_V_PULSE1_CONTROL);
+       DUMP_REG(DC_DISP_V_PULSE1_POSITION_A);
+       DUMP_REG(DC_DISP_V_PULSE1_POSITION_B);
+       DUMP_REG(DC_DISP_V_PULSE1_POSITION_C);
+       DUMP_REG(DC_DISP_V_PULSE2_CONTROL);
+       DUMP_REG(DC_DISP_V_PULSE2_POSITION_A);
+       DUMP_REG(DC_DISP_V_PULSE3_CONTROL);
+       DUMP_REG(DC_DISP_V_PULSE3_POSITION_A);
+       DUMP_REG(DC_DISP_M0_CONTROL);
+       DUMP_REG(DC_DISP_M1_CONTROL);
+       DUMP_REG(DC_DISP_DI_CONTROL);
+       DUMP_REG(DC_DISP_PP_CONTROL);
+       DUMP_REG(DC_DISP_PP_SELECT_A);
+       DUMP_REG(DC_DISP_PP_SELECT_B);
+       DUMP_REG(DC_DISP_PP_SELECT_C);
+       DUMP_REG(DC_DISP_PP_SELECT_D);
+       DUMP_REG(DC_DISP_DISP_CLOCK_CONTROL);
+       DUMP_REG(DC_DISP_DISP_INTERFACE_CONTROL);
+       DUMP_REG(DC_DISP_DISP_COLOR_CONTROL);
+       DUMP_REG(DC_DISP_SHIFT_CLOCK_OPTIONS);
+       DUMP_REG(DC_DISP_DATA_ENABLE_OPTIONS);
+       DUMP_REG(DC_DISP_SERIAL_INTERFACE_OPTIONS);
+       DUMP_REG(DC_DISP_LCD_SPI_OPTIONS);
+       DUMP_REG(DC_DISP_BORDER_COLOR);
+       DUMP_REG(DC_DISP_COLOR_KEY0_LOWER);
+       DUMP_REG(DC_DISP_COLOR_KEY0_UPPER);
+       DUMP_REG(DC_DISP_COLOR_KEY1_LOWER);
+       DUMP_REG(DC_DISP_COLOR_KEY1_UPPER);
+       DUMP_REG(DC_DISP_CURSOR_FOREGROUND);
+       DUMP_REG(DC_DISP_CURSOR_BACKGROUND);
+       DUMP_REG(DC_DISP_CURSOR_START_ADDR);
+       DUMP_REG(DC_DISP_CURSOR_START_ADDR_NS);
+       DUMP_REG(DC_DISP_CURSOR_POSITION);
+       DUMP_REG(DC_DISP_CURSOR_POSITION_NS);
+       DUMP_REG(DC_DISP_INIT_SEQ_CONTROL);
+       DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_A);
+       DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_B);
+       DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_C);
+       DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_D);
+       DUMP_REG(DC_DISP_DC_MCCIF_FIFOCTRL);
+       DUMP_REG(DC_DISP_MCCIF_DISPLAY0A_HYST);
+       DUMP_REG(DC_DISP_MCCIF_DISPLAY0B_HYST);
+       DUMP_REG(DC_DISP_MCCIF_DISPLAY1A_HYST);
+       DUMP_REG(DC_DISP_MCCIF_DISPLAY1B_HYST);
+       DUMP_REG(DC_DISP_DAC_CRT_CTRL);
+       DUMP_REG(DC_DISP_DISP_MISC_CONTROL);
+       DUMP_REG(DC_DISP_SD_CONTROL);
+       DUMP_REG(DC_DISP_SD_CSC_COEFF);
+       DUMP_REG(DC_DISP_SD_LUT(0));
+       DUMP_REG(DC_DISP_SD_LUT(1));
+       DUMP_REG(DC_DISP_SD_LUT(2));
+       DUMP_REG(DC_DISP_SD_LUT(3));
+       DUMP_REG(DC_DISP_SD_LUT(4));
+       DUMP_REG(DC_DISP_SD_LUT(5));
+       DUMP_REG(DC_DISP_SD_LUT(6));
+       DUMP_REG(DC_DISP_SD_LUT(7));
+       DUMP_REG(DC_DISP_SD_LUT(8));
+       DUMP_REG(DC_DISP_SD_FLICKER_CONTROL);
+       DUMP_REG(DC_DISP_DC_PIXEL_COUNT);
+       DUMP_REG(DC_DISP_SD_HISTOGRAM(0));
+       DUMP_REG(DC_DISP_SD_HISTOGRAM(1));
+       DUMP_REG(DC_DISP_SD_HISTOGRAM(2));
+       DUMP_REG(DC_DISP_SD_HISTOGRAM(3));
+       DUMP_REG(DC_DISP_SD_HISTOGRAM(4));
+       DUMP_REG(DC_DISP_SD_HISTOGRAM(5));
+       DUMP_REG(DC_DISP_SD_HISTOGRAM(6));
+       DUMP_REG(DC_DISP_SD_HISTOGRAM(7));
+       DUMP_REG(DC_DISP_SD_BL_TF(0));
+       DUMP_REG(DC_DISP_SD_BL_TF(1));
+       DUMP_REG(DC_DISP_SD_BL_TF(2));
+       DUMP_REG(DC_DISP_SD_BL_TF(3));
+       DUMP_REG(DC_DISP_SD_BL_CONTROL);
+       DUMP_REG(DC_DISP_SD_HW_K_VALUES);
+       DUMP_REG(DC_DISP_SD_MAN_K_VALUES);
+       DUMP_REG(DC_WIN_WIN_OPTIONS);
+       DUMP_REG(DC_WIN_BYTE_SWAP);
+       DUMP_REG(DC_WIN_BUFFER_CONTROL);
+       DUMP_REG(DC_WIN_COLOR_DEPTH);
+       DUMP_REG(DC_WIN_POSITION);
+       DUMP_REG(DC_WIN_SIZE);
+       DUMP_REG(DC_WIN_PRESCALED_SIZE);
+       DUMP_REG(DC_WIN_H_INITIAL_DDA);
+       DUMP_REG(DC_WIN_V_INITIAL_DDA);
+       DUMP_REG(DC_WIN_DDA_INC);
+       DUMP_REG(DC_WIN_LINE_STRIDE);
+       DUMP_REG(DC_WIN_BUF_STRIDE);
+       DUMP_REG(DC_WIN_UV_BUF_STRIDE);
+       DUMP_REG(DC_WIN_BUFFER_ADDR_MODE);
+       DUMP_REG(DC_WIN_DV_CONTROL);
+       DUMP_REG(DC_WIN_BLEND_NOKEY);
+       DUMP_REG(DC_WIN_BLEND_1WIN);
+       DUMP_REG(DC_WIN_BLEND_2WIN_X);
+       DUMP_REG(DC_WIN_BLEND_2WIN_Y);
+       DUMP_REG(DC_WIN_BLEND32WIN_XY);
+       DUMP_REG(DC_WIN_HP_FETCH_CONTROL);
+       DUMP_REG(DC_WINBUF_START_ADDR);
+       DUMP_REG(DC_WINBUF_START_ADDR_NS);
+       DUMP_REG(DC_WINBUF_START_ADDR_U);
+       DUMP_REG(DC_WINBUF_START_ADDR_U_NS);
+       DUMP_REG(DC_WINBUF_START_ADDR_V);
+       DUMP_REG(DC_WINBUF_START_ADDR_V_NS);
+       DUMP_REG(DC_WINBUF_ADDR_H_OFFSET);
+       DUMP_REG(DC_WINBUF_ADDR_H_OFFSET_NS);
+       DUMP_REG(DC_WINBUF_ADDR_V_OFFSET);
+       DUMP_REG(DC_WINBUF_ADDR_V_OFFSET_NS);
+       DUMP_REG(DC_WINBUF_UFLOW_STATUS);
+       DUMP_REG(DC_WINBUF_AD_UFLOW_STATUS);
+       DUMP_REG(DC_WINBUF_BD_UFLOW_STATUS);
+       DUMP_REG(DC_WINBUF_CD_UFLOW_STATUS);
+
+#undef DUMP_REG
+
+       return 0;
+}
+
+static struct drm_info_list debugfs_files[] = {
+       { "regs", tegra_dc_show_regs, 0, NULL },
+};
+
+static int tegra_dc_debugfs_init(struct tegra_dc *dc, struct drm_minor *minor)
+{
+       unsigned int i;
+       char *name;
+       int err;
+
+       name = kasprintf(GFP_KERNEL, "dc.%d", dc->pipe);
+       dc->debugfs = debugfs_create_dir(name, minor->debugfs_root);
+       kfree(name);
+
+       if (!dc->debugfs)
+               return -ENOMEM;
+
+       dc->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files),
+                                   GFP_KERNEL);
+       if (!dc->debugfs_files) {
+               err = -ENOMEM;
+               goto remove;
+       }
+
+       for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
+               dc->debugfs_files[i].data = dc;
+
+       err = drm_debugfs_create_files(dc->debugfs_files,
+                                      ARRAY_SIZE(debugfs_files),
+                                      dc->debugfs, minor);
+       if (err < 0)
+               goto free;
+
+       dc->minor = minor;
+
+       return 0;
+
+free:
+       kfree(dc->debugfs_files);
+       dc->debugfs_files = NULL;
+remove:
+       debugfs_remove(dc->debugfs);
+       dc->debugfs = NULL;
+
+       return err;
+}
+
+static int tegra_dc_debugfs_exit(struct tegra_dc *dc)
+{
+       drm_debugfs_remove_files(dc->debugfs_files, ARRAY_SIZE(debugfs_files),
+                                dc->minor);
+       dc->minor = NULL;
+
+       kfree(dc->debugfs_files);
+       dc->debugfs_files = NULL;
+
+       debugfs_remove(dc->debugfs);
+       dc->debugfs = NULL;
+
+       return 0;
+}
+
+static int tegra_dc_drm_init(struct host1x_client *client,
+                            struct drm_device *drm)
+{
+       struct tegra_dc *dc = host1x_client_to_dc(client);
+       int err;
+
+       dc->pipe = drm->mode_config.num_crtc;
+
+       drm_crtc_init(drm, &dc->base, &tegra_crtc_funcs);
+       drm_mode_crtc_set_gamma_size(&dc->base, 256);
+       drm_crtc_helper_add(&dc->base, &tegra_crtc_helper_funcs);
+
+       err = tegra_dc_rgb_init(drm, dc);
+       if (err < 0 && err != -ENODEV) {
+               dev_err(dc->dev, "failed to initialize RGB output: %d\n", err);
+               return err;
+       }
+
+       if (IS_ENABLED(CONFIG_DEBUG_FS)) {
+               err = tegra_dc_debugfs_init(dc, drm->primary);
+               if (err < 0)
+                       dev_err(dc->dev, "debugfs setup failed: %d\n", err);
+       }
+
+       err = devm_request_irq(dc->dev, dc->irq, tegra_drm_irq, 0,
+                              dev_name(dc->dev), dc);
+       if (err < 0) {
+               dev_err(dc->dev, "failed to request IRQ#%u: %d\n", dc->irq,
+                       err);
+               return err;
+       }
+
+       return 0;
+}
+
+static int tegra_dc_drm_exit(struct host1x_client *client)
+{
+       struct tegra_dc *dc = host1x_client_to_dc(client);
+       int err;
+
+       devm_free_irq(dc->dev, dc->irq, dc);
+
+       if (IS_ENABLED(CONFIG_DEBUG_FS)) {
+               err = tegra_dc_debugfs_exit(dc);
+               if (err < 0)
+                       dev_err(dc->dev, "debugfs cleanup failed: %d\n", err);
+       }
+
+       err = tegra_dc_rgb_exit(dc);
+       if (err) {
+               dev_err(dc->dev, "failed to shutdown RGB output: %d\n", err);
+               return err;
+       }
+
+       return 0;
+}
+
+static const struct host1x_client_ops dc_client_ops = {
+       .drm_init = tegra_dc_drm_init,
+       .drm_exit = tegra_dc_drm_exit,
+};
+
+static int tegra_dc_probe(struct platform_device *pdev)
+{
+       struct host1x *host1x = dev_get_drvdata(pdev->dev.parent);
+       struct resource *regs;
+       struct tegra_dc *dc;
+       int err;
+
+       dc = devm_kzalloc(&pdev->dev, sizeof(*dc), GFP_KERNEL);
+       if (!dc)
+               return -ENOMEM;
+
+       INIT_LIST_HEAD(&dc->list);
+       dc->dev = &pdev->dev;
+
+       dc->clk = devm_clk_get(&pdev->dev, NULL);
+       if (IS_ERR(dc->clk)) {
+               dev_err(&pdev->dev, "failed to get clock\n");
+               return PTR_ERR(dc->clk);
+       }
+
+       err = clk_prepare_enable(dc->clk);
+       if (err < 0)
+               return err;
+
+       regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!regs) {
+               dev_err(&pdev->dev, "failed to get registers\n");
+               return -ENXIO;
+       }
+
+       dc->regs = devm_request_and_ioremap(&pdev->dev, regs);
+       if (!dc->regs) {
+               dev_err(&pdev->dev, "failed to remap registers\n");
+               return -ENXIO;
+       }
+
+       dc->irq = platform_get_irq(pdev, 0);
+       if (dc->irq < 0) {
+               dev_err(&pdev->dev, "failed to get IRQ\n");
+               return -ENXIO;
+       }
+
+       INIT_LIST_HEAD(&dc->client.list);
+       dc->client.ops = &dc_client_ops;
+       dc->client.dev = &pdev->dev;
+
+       err = tegra_dc_rgb_probe(dc);
+       if (err < 0 && err != -ENODEV) {
+               dev_err(&pdev->dev, "failed to probe RGB output: %d\n", err);
+               return err;
+       }
+
+       err = host1x_register_client(host1x, &dc->client);
+       if (err < 0) {
+               dev_err(&pdev->dev, "failed to register host1x client: %d\n",
+                       err);
+               return err;
+       }
+
+       platform_set_drvdata(pdev, dc);
+
+       return 0;
+}
+
+static int tegra_dc_remove(struct platform_device *pdev)
+{
+       struct host1x *host1x = dev_get_drvdata(pdev->dev.parent);
+       struct tegra_dc *dc = platform_get_drvdata(pdev);
+       int err;
+
+       err = host1x_unregister_client(host1x, &dc->client);
+       if (err < 0) {
+               dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
+                       err);
+               return err;
+       }
+
+       clk_disable_unprepare(dc->clk);
+
+       return 0;
+}
+
+static struct of_device_id tegra_dc_of_match[] = {
+       { .compatible = "nvidia,tegra30-dc", },
+       { .compatible = "nvidia,tegra20-dc", },
+       { },
+};
+
+struct platform_driver tegra_dc_driver = {
+       .driver = {
+               .name = "tegra-dc",
+               .owner = THIS_MODULE,
+               .of_match_table = tegra_dc_of_match,
+       },
+       .probe = tegra_dc_probe,
+       .remove = tegra_dc_remove,
+};
diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/drm/tegra/dc.h
new file mode 100644 (file)
index 0000000..99977b5
--- /dev/null
@@ -0,0 +1,388 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012 NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef TEGRA_DC_H
+#define TEGRA_DC_H 1
+
+#define DC_CMD_GENERAL_INCR_SYNCPT             0x000
+#define DC_CMD_GENERAL_INCR_SYNCPT_CNTRL       0x001
+#define DC_CMD_GENERAL_INCR_SYNCPT_ERROR       0x002
+#define DC_CMD_WIN_A_INCR_SYNCPT               0x008
+#define DC_CMD_WIN_A_INCR_SYNCPT_CNTRL         0x009
+#define DC_CMD_WIN_A_INCR_SYNCPT_ERROR         0x00a
+#define DC_CMD_WIN_B_INCR_SYNCPT               0x010
+#define DC_CMD_WIN_B_INCR_SYNCPT_CNTRL         0x011
+#define DC_CMD_WIN_B_INCR_SYNCPT_ERROR         0x012
+#define DC_CMD_WIN_C_INCR_SYNCPT               0x018
+#define DC_CMD_WIN_C_INCR_SYNCPT_CNTRL         0x019
+#define DC_CMD_WIN_C_INCR_SYNCPT_ERROR         0x01a
+#define DC_CMD_CONT_SYNCPT_VSYNC               0x028
+#define DC_CMD_DISPLAY_COMMAND_OPTION0         0x031
+#define DC_CMD_DISPLAY_COMMAND                 0x032
+#define DISP_CTRL_MODE_STOP (0 << 5)
+#define DISP_CTRL_MODE_C_DISPLAY (1 << 5)
+#define DISP_CTRL_MODE_NC_DISPLAY (2 << 5)
+#define DC_CMD_SIGNAL_RAISE                    0x033
+#define DC_CMD_DISPLAY_POWER_CONTROL           0x036
+#define PW0_ENABLE (1 <<  0)
+#define PW1_ENABLE (1 <<  2)
+#define PW2_ENABLE (1 <<  4)
+#define PW3_ENABLE (1 <<  6)
+#define PW4_ENABLE (1 <<  8)
+#define PM0_ENABLE (1 << 16)
+#define PM1_ENABLE (1 << 18)
+
+#define DC_CMD_INT_STATUS                      0x037
+#define DC_CMD_INT_MASK                                0x038
+#define DC_CMD_INT_ENABLE                      0x039
+#define DC_CMD_INT_TYPE                                0x03a
+#define DC_CMD_INT_POLARITY                    0x03b
+#define CTXSW_INT     (1 << 0)
+#define FRAME_END_INT (1 << 1)
+#define VBLANK_INT    (1 << 2)
+#define WIN_A_UF_INT  (1 << 8)
+#define WIN_B_UF_INT  (1 << 9)
+#define WIN_C_UF_INT  (1 << 10)
+#define WIN_A_OF_INT  (1 << 14)
+#define WIN_B_OF_INT  (1 << 15)
+#define WIN_C_OF_INT  (1 << 16)
+
+#define DC_CMD_SIGNAL_RAISE1                   0x03c
+#define DC_CMD_SIGNAL_RAISE2                   0x03d
+#define DC_CMD_SIGNAL_RAISE3                   0x03e
+
+#define DC_CMD_STATE_ACCESS                    0x040
+
+#define DC_CMD_STATE_CONTROL                   0x041
+#define GENERAL_ACT_REQ (1 <<  0)
+#define WIN_A_ACT_REQ   (1 <<  1)
+#define WIN_B_ACT_REQ   (1 <<  2)
+#define WIN_C_ACT_REQ   (1 <<  3)
+#define GENERAL_UPDATE  (1 <<  8)
+#define WIN_A_UPDATE    (1 <<  9)
+#define WIN_B_UPDATE    (1 << 10)
+#define WIN_C_UPDATE    (1 << 11)
+#define NC_HOST_TRIG    (1 << 24)
+
+#define DC_CMD_DISPLAY_WINDOW_HEADER           0x042
+#define WINDOW_A_SELECT (1 << 4)
+#define WINDOW_B_SELECT (1 << 5)
+#define WINDOW_C_SELECT (1 << 6)
+
+#define DC_CMD_REG_ACT_CONTROL                 0x043
+
+#define DC_COM_CRC_CONTROL                     0x300
+#define DC_COM_CRC_CHECKSUM                    0x301
+#define DC_COM_PIN_OUTPUT_ENABLE(x) (0x302 + (x))
+#define DC_COM_PIN_OUTPUT_POLARITY(x) (0x306 + (x))
+#define LVS_OUTPUT_POLARITY_LOW (1 << 28)
+#define LHS_OUTPUT_POLARITY_LOW (1 << 30)
+#define DC_COM_PIN_OUTPUT_DATA(x) (0x30a + (x))
+#define DC_COM_PIN_INPUT_ENABLE(x) (0x30e + (x))
+#define DC_COM_PIN_INPUT_DATA(x) (0x312 + (x))
+#define DC_COM_PIN_OUTPUT_SELECT(x) (0x314 + (x))
+
+#define DC_COM_PIN_MISC_CONTROL                        0x31b
+#define DC_COM_PIN_PM0_CONTROL                 0x31c
+#define DC_COM_PIN_PM0_DUTY_CYCLE              0x31d
+#define DC_COM_PIN_PM1_CONTROL                 0x31e
+#define DC_COM_PIN_PM1_DUTY_CYCLE              0x31f
+
+#define DC_COM_SPI_CONTROL                     0x320
+#define DC_COM_SPI_START_BYTE                  0x321
+#define DC_COM_HSPI_WRITE_DATA_AB              0x322
+#define DC_COM_HSPI_WRITE_DATA_CD              0x323
+#define DC_COM_HSPI_CS_DC                      0x324
+#define DC_COM_SCRATCH_REGISTER_A              0x325
+#define DC_COM_SCRATCH_REGISTER_B              0x326
+#define DC_COM_GPIO_CTRL                       0x327
+#define DC_COM_GPIO_DEBOUNCE_COUNTER           0x328
+#define DC_COM_CRC_CHECKSUM_LATCHED            0x329
+
+#define DC_DISP_DISP_SIGNAL_OPTIONS0           0x400
+#define H_PULSE_0_ENABLE (1 <<  8)
+#define H_PULSE_1_ENABLE (1 << 10)
+#define H_PULSE_2_ENABLE (1 << 12)
+
+#define DC_DISP_DISP_SIGNAL_OPTIONS1           0x401
+
+#define DC_DISP_DISP_WIN_OPTIONS               0x402
+#define HDMI_ENABLE (1 << 30)
+
+#define DC_DISP_DISP_MEM_HIGH_PRIORITY         0x403
+#define CURSOR_THRESHOLD(x)   (((x) & 0x03) << 24)
+#define WINDOW_A_THRESHOLD(x) (((x) & 0x7f) << 16)
+#define WINDOW_B_THRESHOLD(x) (((x) & 0x7f) <<  8)
+#define WINDOW_C_THRESHOLD(x) (((x) & 0xff) <<  0)
+
+#define DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER   0x404
+#define CURSOR_DELAY(x)   (((x) & 0x3f) << 24)
+#define WINDOW_A_DELAY(x) (((x) & 0x3f) << 16)
+#define WINDOW_B_DELAY(x) (((x) & 0x3f) <<  8)
+#define WINDOW_C_DELAY(x) (((x) & 0x3f) <<  0)
+
+#define DC_DISP_DISP_TIMING_OPTIONS            0x405
+#define VSYNC_H_POSITION(x) ((x) & 0xfff)
+
+#define DC_DISP_REF_TO_SYNC                    0x406
+#define DC_DISP_SYNC_WIDTH                     0x407
+#define DC_DISP_BACK_PORCH                     0x408
+#define DC_DISP_ACTIVE                         0x409
+#define DC_DISP_FRONT_PORCH                    0x40a
+#define DC_DISP_H_PULSE0_CONTROL               0x40b
+#define DC_DISP_H_PULSE0_POSITION_A            0x40c
+#define DC_DISP_H_PULSE0_POSITION_B            0x40d
+#define DC_DISP_H_PULSE0_POSITION_C            0x40e
+#define DC_DISP_H_PULSE0_POSITION_D            0x40f
+#define DC_DISP_H_PULSE1_CONTROL               0x410
+#define DC_DISP_H_PULSE1_POSITION_A            0x411
+#define DC_DISP_H_PULSE1_POSITION_B            0x412
+#define DC_DISP_H_PULSE1_POSITION_C            0x413
+#define DC_DISP_H_PULSE1_POSITION_D            0x414
+#define DC_DISP_H_PULSE2_CONTROL               0x415
+#define DC_DISP_H_PULSE2_POSITION_A            0x416
+#define DC_DISP_H_PULSE2_POSITION_B            0x417
+#define DC_DISP_H_PULSE2_POSITION_C            0x418
+#define DC_DISP_H_PULSE2_POSITION_D            0x419
+#define DC_DISP_V_PULSE0_CONTROL               0x41a
+#define DC_DISP_V_PULSE0_POSITION_A            0x41b
+#define DC_DISP_V_PULSE0_POSITION_B            0x41c
+#define DC_DISP_V_PULSE0_POSITION_C            0x41d
+#define DC_DISP_V_PULSE1_CONTROL               0x41e
+#define DC_DISP_V_PULSE1_POSITION_A            0x41f
+#define DC_DISP_V_PULSE1_POSITION_B            0x420
+#define DC_DISP_V_PULSE1_POSITION_C            0x421
+#define DC_DISP_V_PULSE2_CONTROL               0x422
+#define DC_DISP_V_PULSE2_POSITION_A            0x423
+#define DC_DISP_V_PULSE3_CONTROL               0x424
+#define DC_DISP_V_PULSE3_POSITION_A            0x425
+#define DC_DISP_M0_CONTROL                     0x426
+#define DC_DISP_M1_CONTROL                     0x427
+#define DC_DISP_DI_CONTROL                     0x428
+#define DC_DISP_PP_CONTROL                     0x429
+#define DC_DISP_PP_SELECT_A                    0x42a
+#define DC_DISP_PP_SELECT_B                    0x42b
+#define DC_DISP_PP_SELECT_C                    0x42c
+#define DC_DISP_PP_SELECT_D                    0x42d
+
+#define PULSE_MODE_NORMAL    (0 << 3)
+#define PULSE_MODE_ONE_CLOCK (1 << 3)
+#define PULSE_POLARITY_HIGH  (0 << 4)
+#define PULSE_POLARITY_LOW   (1 << 4)
+#define PULSE_QUAL_ALWAYS    (0 << 6)
+#define PULSE_QUAL_VACTIVE   (2 << 6)
+#define PULSE_QUAL_VACTIVE1  (3 << 6)
+#define PULSE_LAST_START_A   (0 << 8)
+#define PULSE_LAST_END_A     (1 << 8)
+#define PULSE_LAST_START_B   (2 << 8)
+#define PULSE_LAST_END_B     (3 << 8)
+#define PULSE_LAST_START_C   (4 << 8)
+#define PULSE_LAST_END_C     (5 << 8)
+#define PULSE_LAST_START_D   (6 << 8)
+#define PULSE_LAST_END_D     (7 << 8)
+
+#define PULSE_START(x) (((x) & 0xfff) <<  0)
+#define PULSE_END(x)   (((x) & 0xfff) << 16)
+
+#define DC_DISP_DISP_CLOCK_CONTROL             0x42e
+#define PIXEL_CLK_DIVIDER_PCD1  (0 << 8)
+#define PIXEL_CLK_DIVIDER_PCD1H (1 << 8)
+#define PIXEL_CLK_DIVIDER_PCD2  (2 << 8)
+#define PIXEL_CLK_DIVIDER_PCD3  (3 << 8)
+#define PIXEL_CLK_DIVIDER_PCD4  (4 << 8)
+#define PIXEL_CLK_DIVIDER_PCD6  (5 << 8)
+#define PIXEL_CLK_DIVIDER_PCD8  (6 << 8)
+#define PIXEL_CLK_DIVIDER_PCD9  (7 << 8)
+#define PIXEL_CLK_DIVIDER_PCD12 (8 << 8)
+#define PIXEL_CLK_DIVIDER_PCD16 (9 << 8)
+#define PIXEL_CLK_DIVIDER_PCD18 (10 << 8)
+#define PIXEL_CLK_DIVIDER_PCD24 (11 << 8)
+#define PIXEL_CLK_DIVIDER_PCD13 (12 << 8)
+#define SHIFT_CLK_DIVIDER(x)    ((x) & 0xff)
+
+#define DC_DISP_DISP_INTERFACE_CONTROL         0x42f
+#define DISP_DATA_FORMAT_DF1P1C    (0 << 0)
+#define DISP_DATA_FORMAT_DF1P2C24B (1 << 0)
+#define DISP_DATA_FORMAT_DF1P2C18B (2 << 0)
+#define DISP_DATA_FORMAT_DF1P2C16B (3 << 0)
+#define DISP_DATA_FORMAT_DF2S      (4 << 0)
+#define DISP_DATA_FORMAT_DF3S      (5 << 0)
+#define DISP_DATA_FORMAT_DFSPI     (6 << 0)
+#define DISP_DATA_FORMAT_DF1P3C24B (7 << 0)
+#define DISP_DATA_FORMAT_DF1P3C18B (8 << 0)
+#define DISP_ALIGNMENT_MSB         (0 << 8)
+#define DISP_ALIGNMENT_LSB         (1 << 8)
+#define DISP_ORDER_RED_BLUE        (0 << 9)
+#define DISP_ORDER_BLUE_RED        (1 << 9)
+
+#define DC_DISP_DISP_COLOR_CONTROL             0x430
+#define BASE_COLOR_SIZE666     (0 << 0)
+#define BASE_COLOR_SIZE111     (1 << 0)
+#define BASE_COLOR_SIZE222     (2 << 0)
+#define BASE_COLOR_SIZE333     (3 << 0)
+#define BASE_COLOR_SIZE444     (4 << 0)
+#define BASE_COLOR_SIZE555     (5 << 0)
+#define BASE_COLOR_SIZE565     (6 << 0)
+#define BASE_COLOR_SIZE332     (7 << 0)
+#define BASE_COLOR_SIZE888     (8 << 0)
+#define DITHER_CONTROL_DISABLE (0 << 8)
+#define DITHER_CONTROL_ORDERED (2 << 8)
+#define DITHER_CONTROL_ERRDIFF (3 << 8)
+
+#define DC_DISP_SHIFT_CLOCK_OPTIONS            0x431
+
+#define DC_DISP_DATA_ENABLE_OPTIONS            0x432
+#define DE_SELECT_ACTIVE_BLANK  (0 << 0)
+#define DE_SELECT_ACTIVE        (1 << 0)
+#define DE_SELECT_ACTIVE_IS     (2 << 0)
+#define DE_CONTROL_ONECLK       (0 << 2)
+#define DE_CONTROL_NORMAL       (1 << 2)
+#define DE_CONTROL_EARLY_EXT    (2 << 2)
+#define DE_CONTROL_EARLY        (3 << 2)
+#define DE_CONTROL_ACTIVE_BLANK (4 << 2)
+
+#define DC_DISP_SERIAL_INTERFACE_OPTIONS       0x433
+#define DC_DISP_LCD_SPI_OPTIONS                        0x434
+#define DC_DISP_BORDER_COLOR                   0x435
+#define DC_DISP_COLOR_KEY0_LOWER               0x436
+#define DC_DISP_COLOR_KEY0_UPPER               0x437
+#define DC_DISP_COLOR_KEY1_LOWER               0x438
+#define DC_DISP_COLOR_KEY1_UPPER               0x439
+
+#define DC_DISP_CURSOR_FOREGROUND              0x43c
+#define DC_DISP_CURSOR_BACKGROUND              0x43d
+
+#define DC_DISP_CURSOR_START_ADDR              0x43e
+#define DC_DISP_CURSOR_START_ADDR_NS           0x43f
+
+#define DC_DISP_CURSOR_POSITION                        0x440
+#define DC_DISP_CURSOR_POSITION_NS             0x441
+
+#define DC_DISP_INIT_SEQ_CONTROL               0x442
+#define DC_DISP_SPI_INIT_SEQ_DATA_A            0x443
+#define DC_DISP_SPI_INIT_SEQ_DATA_B            0x444
+#define DC_DISP_SPI_INIT_SEQ_DATA_C            0x445
+#define DC_DISP_SPI_INIT_SEQ_DATA_D            0x446
+
+#define DC_DISP_DC_MCCIF_FIFOCTRL              0x480
+#define DC_DISP_MCCIF_DISPLAY0A_HYST           0x481
+#define DC_DISP_MCCIF_DISPLAY0B_HYST           0x482
+#define DC_DISP_MCCIF_DISPLAY1A_HYST           0x483
+#define DC_DISP_MCCIF_DISPLAY1B_HYST           0x484
+
+#define DC_DISP_DAC_CRT_CTRL                   0x4c0
+#define DC_DISP_DISP_MISC_CONTROL              0x4c1
+#define DC_DISP_SD_CONTROL                     0x4c2
+#define DC_DISP_SD_CSC_COEFF                   0x4c3
+#define DC_DISP_SD_LUT(x)                      (0x4c4 + (x))
+#define DC_DISP_SD_FLICKER_CONTROL             0x4cd
+#define DC_DISP_DC_PIXEL_COUNT                 0x4ce
+#define DC_DISP_SD_HISTOGRAM(x)                        (0x4cf + (x))
+#define DC_DISP_SD_BL_PARAMETERS               0x4d7
+#define DC_DISP_SD_BL_TF(x)                    (0x4d8 + (x))
+#define DC_DISP_SD_BL_CONTROL                  0x4dc
+#define DC_DISP_SD_HW_K_VALUES                 0x4dd
+#define DC_DISP_SD_MAN_K_VALUES                        0x4de
+
+#define DC_WIN_WIN_OPTIONS                     0x700
+#define COLOR_EXPAND (1 <<  6)
+#define WIN_ENABLE   (1 << 30)
+
+#define DC_WIN_BYTE_SWAP                       0x701
+#define BYTE_SWAP_NOSWAP  (0 << 0)
+#define BYTE_SWAP_SWAP2   (1 << 0)
+#define BYTE_SWAP_SWAP4   (2 << 0)
+#define BYTE_SWAP_SWAP4HW (3 << 0)
+
+#define DC_WIN_BUFFER_CONTROL                  0x702
+#define BUFFER_CONTROL_HOST  (0 << 0)
+#define BUFFER_CONTROL_VI    (1 << 0)
+#define BUFFER_CONTROL_EPP   (2 << 0)
+#define BUFFER_CONTROL_MPEGE (3 << 0)
+#define BUFFER_CONTROL_SB2D  (4 << 0)
+
+#define DC_WIN_COLOR_DEPTH                     0x703
+#define WIN_COLOR_DEPTH_P1              0
+#define WIN_COLOR_DEPTH_P2              1
+#define WIN_COLOR_DEPTH_P4              2
+#define WIN_COLOR_DEPTH_P8              3
+#define WIN_COLOR_DEPTH_B4G4R4A4        4
+#define WIN_COLOR_DEPTH_B5G5R5A         5
+#define WIN_COLOR_DEPTH_B5G6R5          6
+#define WIN_COLOR_DEPTH_AB5G5R5         7
+#define WIN_COLOR_DEPTH_B8G8R8A8       12
+#define WIN_COLOR_DEPTH_R8G8B8A8       13
+#define WIN_COLOR_DEPTH_B6x2G6x2R6x2A8 14
+#define WIN_COLOR_DEPTH_R6x2G6x2B6x2A8 15
+#define WIN_COLOR_DEPTH_YCbCr422       16
+#define WIN_COLOR_DEPTH_YUV422         17
+#define WIN_COLOR_DEPTH_YCbCr420P      18
+#define WIN_COLOR_DEPTH_YUV420P        19
+#define WIN_COLOR_DEPTH_YCbCr422P      20
+#define WIN_COLOR_DEPTH_YUV422P        21
+#define WIN_COLOR_DEPTH_YCbCr422R      22
+#define WIN_COLOR_DEPTH_YUV422R        23
+#define WIN_COLOR_DEPTH_YCbCr422RA     24
+#define WIN_COLOR_DEPTH_YUV422RA       25
+
+#define DC_WIN_POSITION                                0x704
+#define H_POSITION(x) (((x) & 0x1fff) <<  0)
+#define V_POSITION(x) (((x) & 0x1fff) << 16)
+
+#define DC_WIN_SIZE                            0x705
+#define H_SIZE(x) (((x) & 0x1fff) <<  0)
+#define V_SIZE(x) (((x) & 0x1fff) << 16)
+
+#define DC_WIN_PRESCALED_SIZE                  0x706
+#define H_PRESCALED_SIZE(x) (((x) & 0x7fff) <<  0)
+#define V_PRESCALED_SIZE(x) (((x) & 0x1fff) << 16)
+
+#define DC_WIN_H_INITIAL_DDA                   0x707
+#define DC_WIN_V_INITIAL_DDA                   0x708
+#define DC_WIN_DDA_INC                         0x709
+#define H_DDA_INC(x) (((x) & 0xffff) <<  0)
+#define V_DDA_INC(x) (((x) & 0xffff) << 16)
+
+#define DC_WIN_LINE_STRIDE                     0x70a
+#define DC_WIN_BUF_STRIDE                      0x70b
+#define DC_WIN_UV_BUF_STRIDE                   0x70c
+#define DC_WIN_BUFFER_ADDR_MODE                        0x70d
+#define DC_WIN_DV_CONTROL                      0x70e
+
+#define DC_WIN_BLEND_NOKEY                     0x70f
+#define DC_WIN_BLEND_1WIN                      0x710
+#define DC_WIN_BLEND_2WIN_X                    0x711
+#define DC_WIN_BLEND_2WIN_Y                    0x712
+#define DC_WIN_BLEND32WIN_XY                   0x713
+
+#define DC_WIN_HP_FETCH_CONTROL                        0x714
+
+#define DC_WINBUF_START_ADDR                   0x800
+#define DC_WINBUF_START_ADDR_NS                        0x801
+#define DC_WINBUF_START_ADDR_U                 0x802
+#define DC_WINBUF_START_ADDR_U_NS              0x803
+#define DC_WINBUF_START_ADDR_V                 0x804
+#define DC_WINBUF_START_ADDR_V_NS              0x805
+
+#define DC_WINBUF_ADDR_H_OFFSET                        0x806
+#define DC_WINBUF_ADDR_H_OFFSET_NS             0x807
+#define DC_WINBUF_ADDR_V_OFFSET                        0x808
+#define DC_WINBUF_ADDR_V_OFFSET_NS             0x809
+
+#define DC_WINBUF_UFLOW_STATUS                 0x80a
+
+#define DC_WINBUF_AD_UFLOW_STATUS              0xbca
+#define DC_WINBUF_BD_UFLOW_STATUS              0xdca
+#define DC_WINBUF_CD_UFLOW_STATUS              0xfca
+
+/* synchronization points */
+#define SYNCPT_VBLANK0 26
+#define SYNCPT_VBLANK1 27
+
+#endif /* TEGRA_DC_H */
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
new file mode 100644 (file)
index 0000000..3a503c9
--- /dev/null
@@ -0,0 +1,115 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012 NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+
+#include <mach/clk.h>
+#include <linux/dma-mapping.h>
+#include <asm/dma-iommu.h>
+
+#include "drm.h"
+
+#define DRIVER_NAME "tegra"
+#define DRIVER_DESC "NVIDIA Tegra graphics"
+#define DRIVER_DATE "20120330"
+#define DRIVER_MAJOR 0
+#define DRIVER_MINOR 0
+#define DRIVER_PATCHLEVEL 0
+
+static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
+{
+       struct device *dev = drm->dev;
+       struct host1x *host1x;
+       int err;
+
+       host1x = dev_get_drvdata(dev);
+       drm->dev_private = host1x;
+       host1x->drm = drm;
+
+       drm_mode_config_init(drm);
+
+       err = host1x_drm_init(host1x, drm);
+       if (err < 0)
+               return err;
+
+       err = tegra_drm_fb_init(drm);
+       if (err < 0)
+               return err;
+
+       drm_kms_helper_poll_init(drm);
+
+       return 0;
+}
+
+static int tegra_drm_unload(struct drm_device *drm)
+{
+       drm_kms_helper_poll_fini(drm);
+       tegra_drm_fb_exit(drm);
+
+       drm_mode_config_cleanup(drm);
+
+       return 0;
+}
+
+static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
+{
+       return 0;
+}
+
+static void tegra_drm_lastclose(struct drm_device *drm)
+{
+       struct host1x *host1x = drm->dev_private;
+
+       drm_fbdev_cma_restore_mode(host1x->fbdev);
+}
+
+static struct drm_ioctl_desc tegra_drm_ioctls[] = {
+};
+
+static const struct file_operations tegra_drm_fops = {
+       .owner = THIS_MODULE,
+       .open = drm_open,
+       .release = drm_release,
+       .unlocked_ioctl = drm_ioctl,
+       .mmap = drm_gem_cma_mmap,
+       .poll = drm_poll,
+       .fasync = drm_fasync,
+       .read = drm_read,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl = drm_compat_ioctl,
+#endif
+       .llseek = noop_llseek,
+};
+
+struct drm_driver tegra_drm_driver = {
+       .driver_features = DRIVER_BUS_PLATFORM | DRIVER_MODESET | DRIVER_GEM,
+       .load = tegra_drm_load,
+       .unload = tegra_drm_unload,
+       .open = tegra_drm_open,
+       .lastclose = tegra_drm_lastclose,
+
+       .gem_free_object = drm_gem_cma_free_object,
+       .gem_vm_ops = &drm_gem_cma_vm_ops,
+       .dumb_create = drm_gem_cma_dumb_create,
+       .dumb_map_offset = drm_gem_cma_dumb_map_offset,
+       .dumb_destroy = drm_gem_cma_dumb_destroy,
+
+       .ioctls = tegra_drm_ioctls,
+       .num_ioctls = ARRAY_SIZE(tegra_drm_ioctls),
+       .fops = &tegra_drm_fops,
+
+       .name = DRIVER_NAME,
+       .desc = DRIVER_DESC,
+       .date = DRIVER_DATE,
+       .major = DRIVER_MAJOR,
+       .minor = DRIVER_MINOR,
+       .patchlevel = DRIVER_PATCHLEVEL,
+};
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
new file mode 100644 (file)
index 0000000..3a843a7
--- /dev/null
@@ -0,0 +1,234 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012 NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef TEGRA_DRM_H
+#define TEGRA_DRM_H 1
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fixed.h>
+
+struct tegra_framebuffer {
+       struct drm_framebuffer base;
+       struct drm_gem_cma_object *obj;
+};
+
+static inline struct tegra_framebuffer *to_tegra_fb(struct drm_framebuffer *fb)
+{
+       return container_of(fb, struct tegra_framebuffer, base);
+}
+
+struct host1x {
+       struct drm_device *drm;
+       struct device *dev;
+       void __iomem *regs;
+       struct clk *clk;
+       int syncpt;
+       int irq;
+
+       struct mutex drm_clients_lock;
+       struct list_head drm_clients;
+       struct list_head drm_active;
+
+       struct mutex clients_lock;
+       struct list_head clients;
+
+       struct drm_fbdev_cma *fbdev;
+       struct tegra_framebuffer fb;
+};
+
+struct host1x_client;
+
+struct host1x_client_ops {
+       int (*drm_init)(struct host1x_client *client, struct drm_device *drm);
+       int (*drm_exit)(struct host1x_client *client);
+};
+
+struct host1x_client {
+       struct host1x *host1x;
+       struct device *dev;
+
+       const struct host1x_client_ops *ops;
+
+       struct list_head list;
+};
+
+extern int host1x_drm_init(struct host1x *host1x, struct drm_device *drm);
+extern int host1x_drm_exit(struct host1x *host1x);
+
+extern int host1x_register_client(struct host1x *host1x,
+                                 struct host1x_client *client);
+extern int host1x_unregister_client(struct host1x *host1x,
+                                   struct host1x_client *client);
+
+struct tegra_output;
+
+struct tegra_dc {
+       struct host1x_client client;
+
+       struct host1x *host1x;
+       struct device *dev;
+
+       struct drm_crtc base;
+       int pipe;
+
+       struct clk *clk;
+
+       void __iomem *regs;
+       int irq;
+
+       struct tegra_output *rgb;
+
+       struct list_head list;
+
+       struct drm_info_list *debugfs_files;
+       struct drm_minor *minor;
+       struct dentry *debugfs;
+};
+
+static inline struct tegra_dc *host1x_client_to_dc(struct host1x_client *client)
+{
+       return container_of(client, struct tegra_dc, client);
+}
+
+static inline struct tegra_dc *to_tegra_dc(struct drm_crtc *crtc)
+{
+       return container_of(crtc, struct tegra_dc, base);
+}
+
+static inline void tegra_dc_writel(struct tegra_dc *dc, unsigned long value,
+                                  unsigned long reg)
+{
+       writel(value, dc->regs + (reg << 2));
+}
+
+static inline unsigned long tegra_dc_readl(struct tegra_dc *dc,
+                                          unsigned long reg)
+{
+       return readl(dc->regs + (reg << 2));
+}
+
+struct tegra_output_ops {
+       int (*enable)(struct tegra_output *output);
+       int (*disable)(struct tegra_output *output);
+       int (*setup_clock)(struct tegra_output *output, struct clk *clk,
+                          unsigned long pclk);
+       int (*check_mode)(struct tegra_output *output,
+                         struct drm_display_mode *mode,
+                         enum drm_mode_status *status);
+};
+
+enum tegra_output_type {
+       TEGRA_OUTPUT_RGB,
+       TEGRA_OUTPUT_HDMI,
+};
+
+struct tegra_output {
+       struct device_node *of_node;
+       struct device *dev;
+
+       const struct tegra_output_ops *ops;
+       enum tegra_output_type type;
+
+       struct i2c_adapter *ddc;
+       const struct edid *edid;
+       unsigned int hpd_irq;
+       int hpd_gpio;
+
+       struct drm_encoder encoder;
+       struct drm_connector connector;
+};
+
+static inline struct tegra_output *encoder_to_output(struct drm_encoder *e)
+{
+       return container_of(e, struct tegra_output, encoder);
+}
+
+static inline struct tegra_output *connector_to_output(struct drm_connector *c)
+{
+       return container_of(c, struct tegra_output, connector);
+}
+
+static inline int tegra_output_enable(struct tegra_output *output)
+{
+       if (output && output->ops && output->ops->enable)
+               return output->ops->enable(output);
+
+       return output ? -ENOSYS : -EINVAL;
+}
+
+static inline int tegra_output_disable(struct tegra_output *output)
+{
+       if (output && output->ops && output->ops->disable)
+               return output->ops->disable(output);
+
+       return output ? -ENOSYS : -EINVAL;
+}
+
+static inline int tegra_output_setup_clock(struct tegra_output *output,
+                                          struct clk *clk, unsigned long pclk)
+{
+       if (output && output->ops && output->ops->setup_clock)
+               return output->ops->setup_clock(output, clk, pclk);
+
+       return output ? -ENOSYS : -EINVAL;
+}
+
+static inline int tegra_output_check_mode(struct tegra_output *output,
+                                         struct drm_display_mode *mode,
+                                         enum drm_mode_status *status)
+{
+       if (output && output->ops && output->ops->check_mode)
+               return output->ops->check_mode(output, mode, status);
+
+       return output ? -ENOSYS : -EINVAL;
+}
+
+/* from rgb.c */
+extern int tegra_dc_rgb_probe(struct tegra_dc *dc);
+extern int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc);
+extern int tegra_dc_rgb_exit(struct tegra_dc *dc);
+
+/* from output.c */
+extern int tegra_output_parse_dt(struct tegra_output *output);
+extern int tegra_output_init(struct drm_device *drm, struct tegra_output *output);
+extern int tegra_output_exit(struct tegra_output *output);
+
+/* from gem.c */
+extern struct tegra_gem_object *tegra_gem_alloc(struct drm_device *drm,
+                                               size_t size);
+extern int tegra_gem_handle_create(struct drm_device *drm,
+                                  struct drm_file *file, size_t size,
+                                  unsigned long flags, uint32_t *handle);
+extern int tegra_gem_dumb_create(struct drm_file *file, struct drm_device *drm,
+                                struct drm_mode_create_dumb *args);
+extern int tegra_gem_dumb_map_offset(struct drm_file *file,
+                                    struct drm_device *drm, uint32_t handle,
+                                    uint64_t *offset);
+extern int tegra_gem_dumb_destroy(struct drm_file *file,
+                                 struct drm_device *drm, uint32_t handle);
+extern int tegra_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
+extern int tegra_gem_init_object(struct drm_gem_object *obj);
+extern void tegra_gem_free_object(struct drm_gem_object *obj);
+extern struct vm_operations_struct tegra_gem_vm_ops;
+
+/* from fb.c */
+extern int tegra_drm_fb_init(struct drm_device *drm);
+extern void tegra_drm_fb_exit(struct drm_device *drm);
+
+extern struct platform_driver tegra_host1x_driver;
+extern struct platform_driver tegra_hdmi_driver;
+extern struct platform_driver tegra_dc_driver;
+extern struct drm_driver tegra_drm_driver;
+
+#endif /* TEGRA_DRM_H */
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
new file mode 100644 (file)
index 0000000..97993c6
--- /dev/null
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012 NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "drm.h"
+
+static void tegra_drm_fb_output_poll_changed(struct drm_device *drm)
+{
+       struct host1x *host1x = drm->dev_private;
+
+       drm_fbdev_cma_hotplug_event(host1x->fbdev);
+}
+
+static const struct drm_mode_config_funcs tegra_drm_mode_funcs = {
+       .fb_create = drm_fb_cma_create,
+       .output_poll_changed = tegra_drm_fb_output_poll_changed,
+};
+
+int tegra_drm_fb_init(struct drm_device *drm)
+{
+       struct host1x *host1x = drm->dev_private;
+       struct drm_fbdev_cma *fbdev;
+
+       drm->mode_config.min_width = 0;
+       drm->mode_config.min_height = 0;
+
+       drm->mode_config.max_width = 4096;
+       drm->mode_config.max_height = 4096;
+
+       drm->mode_config.funcs = &tegra_drm_mode_funcs;
+
+       fbdev = drm_fbdev_cma_init(drm, 32, drm->mode_config.num_crtc,
+                                  drm->mode_config.num_connector);
+       if (IS_ERR(fbdev))
+               return PTR_ERR(fbdev);
+
+#ifndef CONFIG_FRAMEBUFFER_CONSOLE
+       drm_fbdev_cma_restore_mode(fbdev);
+#endif
+
+       host1x->fbdev = fbdev;
+
+       return 0;
+}
+
+void tegra_drm_fb_exit(struct drm_device *drm)
+{
+       struct host1x *host1x = drm->dev_private;
+
+       drm_fbdev_cma_fini(host1x->fbdev);
+}
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
new file mode 100644 (file)
index 0000000..ab40164
--- /dev/null
@@ -0,0 +1,1334 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012 NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/gpio.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+
+#include <mach/clk.h>
+
+#include "hdmi.h"
+#include "drm.h"
+#include "dc.h"
+
+struct tegra_hdmi {
+       struct host1x_client client;
+       struct tegra_output output;
+       struct device *dev;
+
+       struct regulator *vdd;
+       struct regulator *pll;
+
+       void __iomem *regs;
+       unsigned int irq;
+
+       struct clk *clk_parent;
+       struct clk *clk;
+
+       unsigned int audio_source;
+       unsigned int audio_freq;
+       bool stereo;
+       bool dvi;
+
+       struct drm_info_list *debugfs_files;
+       struct drm_minor *minor;
+       struct dentry *debugfs;
+};
+
+static inline struct tegra_hdmi *
+host1x_client_to_hdmi(struct host1x_client *client)
+{
+       return container_of(client, struct tegra_hdmi, client);
+}
+
+static inline struct tegra_hdmi *to_hdmi(struct tegra_output *output)
+{
+       return container_of(output, struct tegra_hdmi, output);
+}
+
+#define HDMI_AUDIOCLK_FREQ 216000000
+#define HDMI_REKEY_DEFAULT 56
+
+enum {
+       AUTO = 0,
+       SPDIF,
+       HDA,
+};
+
+static inline unsigned long tegra_hdmi_readl(struct tegra_hdmi *hdmi,
+                                            unsigned long reg)
+{
+       return readl(hdmi->regs + (reg << 2));
+}
+
+static inline void tegra_hdmi_writel(struct tegra_hdmi *hdmi, unsigned long val,
+                                    unsigned long reg)
+{
+       writel(val, hdmi->regs + (reg << 2));
+}
+
+struct tegra_hdmi_audio_config {
+       unsigned int pclk;
+       unsigned int n;
+       unsigned int cts;
+       unsigned int aval;
+};
+
+static const struct tegra_hdmi_audio_config tegra_hdmi_audio_32k[] = {
+       {  25200000, 4096,  25200, 24000 },
+       {  27000000, 4096,  27000, 24000 },
+       {  74250000, 4096,  74250, 24000 },
+       { 148500000, 4096, 148500, 24000 },
+       {         0,    0,      0,     0 },
+};
+
+static const struct tegra_hdmi_audio_config tegra_hdmi_audio_44_1k[] = {
+       {  25200000, 5880,  26250, 25000 },
+       {  27000000, 5880,  28125, 25000 },
+       {  74250000, 4704,  61875, 20000 },
+       { 148500000, 4704, 123750, 20000 },
+       {         0,    0,      0,     0 },
+};
+
+static const struct tegra_hdmi_audio_config tegra_hdmi_audio_48k[] = {
+       {  25200000, 6144,  25200, 24000 },
+       {  27000000, 6144,  27000, 24000 },
+       {  74250000, 6144,  74250, 24000 },
+       { 148500000, 6144, 148500, 24000 },
+       {         0,    0,      0,     0 },
+};
+
+static const struct tegra_hdmi_audio_config tegra_hdmi_audio_88_2k[] = {
+       {  25200000, 11760,  26250, 25000 },
+       {  27000000, 11760,  28125, 25000 },
+       {  74250000,  9408,  61875, 20000 },
+       { 148500000,  9408, 123750, 20000 },
+       {         0,     0,      0,     0 },
+};
+
+static const struct tegra_hdmi_audio_config tegra_hdmi_audio_96k[] = {
+       {  25200000, 12288,  25200, 24000 },
+       {  27000000, 12288,  27000, 24000 },
+       {  74250000, 12288,  74250, 24000 },
+       { 148500000, 12288, 148500, 24000 },
+       {         0,     0,      0,     0 },
+};
+
+static const struct tegra_hdmi_audio_config tegra_hdmi_audio_176_4k[] = {
+       {  25200000, 23520,  26250, 25000 },
+       {  27000000, 23520,  28125, 25000 },
+       {  74250000, 18816,  61875, 20000 },
+       { 148500000, 18816, 123750, 20000 },
+       {         0,     0,      0,     0 },
+};
+
+static const struct tegra_hdmi_audio_config tegra_hdmi_audio_192k[] = {
+       {  25200000, 24576,  25200, 24000 },
+       {  27000000, 24576,  27000, 24000 },
+       {  74250000, 24576,  74250, 24000 },
+       { 148500000, 24576, 148500, 24000 },
+       {         0,     0,      0,     0 },
+};
+
+struct tmds_config {
+       unsigned int pclk;
+       u32 pll0;
+       u32 pll1;
+       u32 pe_current;
+       u32 drive_current;
+};
+
+static const struct tmds_config tegra2_tmds_config[] = {
+       { /* 480p modes */
+               .pclk = 27000000,
+               .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
+                       SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(0) |
+                       SOR_PLL_TX_REG_LOAD(3),
+               .pll1 = SOR_PLL_TMDS_TERM_ENABLE,
+               .pe_current = PE_CURRENT0(PE_CURRENT_0_0_mA) |
+                       PE_CURRENT1(PE_CURRENT_0_0_mA) |
+                       PE_CURRENT2(PE_CURRENT_0_0_mA) |
+                       PE_CURRENT3(PE_CURRENT_0_0_mA),
+               .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_7_125_mA) |
+                       DRIVE_CURRENT_LANE1(DRIVE_CURRENT_7_125_mA) |
+                       DRIVE_CURRENT_LANE2(DRIVE_CURRENT_7_125_mA) |
+                       DRIVE_CURRENT_LANE3(DRIVE_CURRENT_7_125_mA),
+       }, { /* 720p modes */
+               .pclk = 74250000,
+               .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
+                       SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(1) |
+                       SOR_PLL_TX_REG_LOAD(3),
+               .pll1 = SOR_PLL_TMDS_TERM_ENABLE | SOR_PLL_PE_EN,
+               .pe_current = PE_CURRENT0(PE_CURRENT_6_0_mA) |
+                       PE_CURRENT1(PE_CURRENT_6_0_mA) |
+                       PE_CURRENT2(PE_CURRENT_6_0_mA) |
+                       PE_CURRENT3(PE_CURRENT_6_0_mA),
+               .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_7_125_mA) |
+                       DRIVE_CURRENT_LANE1(DRIVE_CURRENT_7_125_mA) |
+                       DRIVE_CURRENT_LANE2(DRIVE_CURRENT_7_125_mA) |
+                       DRIVE_CURRENT_LANE3(DRIVE_CURRENT_7_125_mA),
+       }, { /* 1080p modes */
+               .pclk = UINT_MAX,
+               .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
+                       SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(1) |
+                       SOR_PLL_TX_REG_LOAD(3),
+               .pll1 = SOR_PLL_TMDS_TERM_ENABLE | SOR_PLL_PE_EN,
+               .pe_current = PE_CURRENT0(PE_CURRENT_6_0_mA) |
+                       PE_CURRENT1(PE_CURRENT_6_0_mA) |
+                       PE_CURRENT2(PE_CURRENT_6_0_mA) |
+                       PE_CURRENT3(PE_CURRENT_6_0_mA),
+               .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_7_125_mA) |
+                       DRIVE_CURRENT_LANE1(DRIVE_CURRENT_7_125_mA) |
+                       DRIVE_CURRENT_LANE2(DRIVE_CURRENT_7_125_mA) |
+                       DRIVE_CURRENT_LANE3(DRIVE_CURRENT_7_125_mA),
+       },
+};
+
+static const struct tmds_config tegra3_tmds_config[] = {
+       { /* 480p modes */
+               .pclk = 27000000,
+               .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
+                       SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(0) |
+                       SOR_PLL_TX_REG_LOAD(0),
+               .pll1 = SOR_PLL_TMDS_TERM_ENABLE,
+               .pe_current = PE_CURRENT0(PE_CURRENT_0_0_mA) |
+                       PE_CURRENT1(PE_CURRENT_0_0_mA) |
+                       PE_CURRENT2(PE_CURRENT_0_0_mA) |
+                       PE_CURRENT3(PE_CURRENT_0_0_mA),
+               .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_5_250_mA) |
+                       DRIVE_CURRENT_LANE1(DRIVE_CURRENT_5_250_mA) |
+                       DRIVE_CURRENT_LANE2(DRIVE_CURRENT_5_250_mA) |
+                       DRIVE_CURRENT_LANE3(DRIVE_CURRENT_5_250_mA),
+       }, { /* 720p modes */
+               .pclk = 74250000,
+               .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
+                       SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(1) |
+                       SOR_PLL_TX_REG_LOAD(0),
+               .pll1 = SOR_PLL_TMDS_TERM_ENABLE | SOR_PLL_PE_EN,
+               .pe_current = PE_CURRENT0(PE_CURRENT_5_0_mA) |
+                       PE_CURRENT1(PE_CURRENT_5_0_mA) |
+                       PE_CURRENT2(PE_CURRENT_5_0_mA) |
+                       PE_CURRENT3(PE_CURRENT_5_0_mA),
+               .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_5_250_mA) |
+                       DRIVE_CURRENT_LANE1(DRIVE_CURRENT_5_250_mA) |
+                       DRIVE_CURRENT_LANE2(DRIVE_CURRENT_5_250_mA) |
+                       DRIVE_CURRENT_LANE3(DRIVE_CURRENT_5_250_mA),
+       }, { /* 1080p modes */
+               .pclk = UINT_MAX,
+               .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
+                       SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(3) |
+                       SOR_PLL_TX_REG_LOAD(0),
+               .pll1 = SOR_PLL_TMDS_TERM_ENABLE | SOR_PLL_PE_EN,
+               .pe_current = PE_CURRENT0(PE_CURRENT_5_0_mA) |
+                       PE_CURRENT1(PE_CURRENT_5_0_mA) |
+                       PE_CURRENT2(PE_CURRENT_5_0_mA) |
+                       PE_CURRENT3(PE_CURRENT_5_0_mA),
+               .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_5_250_mA) |
+                       DRIVE_CURRENT_LANE1(DRIVE_CURRENT_5_250_mA) |
+                       DRIVE_CURRENT_LANE2(DRIVE_CURRENT_5_250_mA) |
+                       DRIVE_CURRENT_LANE3(DRIVE_CURRENT_5_250_mA),
+       },
+};
+
+static const struct tegra_hdmi_audio_config *
+tegra_hdmi_get_audio_config(unsigned int audio_freq, unsigned int pclk)
+{
+       const struct tegra_hdmi_audio_config *table;
+
+       switch (audio_freq) {
+       case 32000:
+               table = tegra_hdmi_audio_32k;
+               break;
+
+       case 44100:
+               table = tegra_hdmi_audio_44_1k;
+               break;
+
+       case 48000:
+               table = tegra_hdmi_audio_48k;
+               break;
+
+       case 88200:
+               table = tegra_hdmi_audio_88_2k;
+               break;
+
+       case 96000:
+               table = tegra_hdmi_audio_96k;
+               break;
+
+       case 176400:
+               table = tegra_hdmi_audio_176_4k;
+               break;
+
+       case 192000:
+               table = tegra_hdmi_audio_192k;
+               break;
+
+       default:
+               return NULL;
+       }
+
+       while (table->pclk) {
+               if (table->pclk == pclk)
+                       return table;
+
+               table++;
+       }
+
+       return NULL;
+}
+
+static void tegra_hdmi_setup_audio_fs_tables(struct tegra_hdmi *hdmi)
+{
+       const unsigned int freqs[] = {
+               32000, 44100, 48000, 88200, 96000, 176400, 192000
+       };
+       unsigned int i;
+
+       for (i = 0; i < ARRAY_SIZE(freqs); i++) {
+               unsigned int f = freqs[i];
+               unsigned int eight_half;
+               unsigned long value;
+               unsigned int delta;
+
+               if (f > 96000)
+                       delta = 2;
+               else if (f > 480000)
+                       delta = 6;
+               else
+                       delta = 9;
+
+               eight_half = (8 * HDMI_AUDIOCLK_FREQ) / (f * 128);
+               value = AUDIO_FS_LOW(eight_half - delta) |
+                       AUDIO_FS_HIGH(eight_half + delta);
+               tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_FS(i));
+       }
+}
+
+static int tegra_hdmi_setup_audio(struct tegra_hdmi *hdmi, unsigned int pclk)
+{
+       struct device_node *node = hdmi->dev->of_node;
+       const struct tegra_hdmi_audio_config *config;
+       unsigned int offset = 0;
+       unsigned long value;
+
+       switch (hdmi->audio_source) {
+       case HDA:
+               value = AUDIO_CNTRL0_SOURCE_SELECT_HDAL;
+               break;
+
+       case SPDIF:
+               value = AUDIO_CNTRL0_SOURCE_SELECT_SPDIF;
+               break;
+
+       default:
+               value = AUDIO_CNTRL0_SOURCE_SELECT_AUTO;
+               break;
+       }
+
+       if (of_device_is_compatible(node, "nvidia,tegra30-hdmi")) {
+               value |= AUDIO_CNTRL0_ERROR_TOLERANCE(6) |
+                        AUDIO_CNTRL0_FRAMES_PER_BLOCK(0xc0);
+               tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_CNTRL0);
+       } else {
+               value |= AUDIO_CNTRL0_INJECT_NULLSMPL;
+               tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_AUDIO_CNTRL0);
+
+               value = AUDIO_CNTRL0_ERROR_TOLERANCE(6) |
+                       AUDIO_CNTRL0_FRAMES_PER_BLOCK(0xc0);
+               tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_CNTRL0);
+       }
+
+       config = tegra_hdmi_get_audio_config(hdmi->audio_freq, pclk);
+       if (!config) {
+               dev_err(hdmi->dev, "cannot set audio to %u at %u pclk\n",
+                       hdmi->audio_freq, pclk);
+               return -EINVAL;
+       }
+
+       tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_HDMI_ACR_CTRL);
+
+       value = AUDIO_N_RESETF | AUDIO_N_GENERATE_ALTERNATE |
+               AUDIO_N_VALUE(config->n - 1);
+       tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_N);
+
+       tegra_hdmi_writel(hdmi, ACR_SUBPACK_N(config->n) | ACR_ENABLE,
+                         HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH);
+
+       value = ACR_SUBPACK_CTS(config->cts);
+       tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW);
+
+       value = SPARE_HW_CTS | SPARE_FORCE_SW_CTS | SPARE_CTS_RESET_VAL(1);
+       tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_SPARE);
+
+       value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_AUDIO_N);
+       value &= ~AUDIO_N_RESETF;
+       tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_N);
+
+       if (of_device_is_compatible(node, "nvidia,tegra30-hdmi")) {
+               switch (hdmi->audio_freq) {
+               case 32000:
+                       offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0320;
+                       break;
+
+               case 44100:
+                       offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0441;
+                       break;
+
+               case 48000:
+                       offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0480;
+                       break;
+
+               case 88200:
+                       offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0882;
+                       break;
+
+               case 96000:
+                       offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0960;
+                       break;
+
+               case 176400:
+                       offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_1764;
+                       break;
+
+               case 192000:
+                       offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_1920;
+                       break;
+               }
+
+               tegra_hdmi_writel(hdmi, config->aval, offset);
+       }
+
+       tegra_hdmi_setup_audio_fs_tables(hdmi);
+
+       return 0;
+}
+
+static void tegra_hdmi_write_infopack(struct tegra_hdmi *hdmi,
+                                     unsigned int offset, u8 type,
+                                     u8 version, void *data, size_t size)
+{
+       unsigned long value;
+       u8 *ptr = data;
+       u32 subpack[2];
+       size_t i;
+       u8 csum;
+
+       /* first byte of data is the checksum */
+       csum = type + version + size - 1;
+
+       for (i = 1; i < size; i++)
+               csum += ptr[i];
+
+       ptr[0] = 0x100 - csum;
+
+       value = INFOFRAME_HEADER_TYPE(type) |
+               INFOFRAME_HEADER_VERSION(version) |
+               INFOFRAME_HEADER_LEN(size - 1);
+       tegra_hdmi_writel(hdmi, value, offset);
+
+       /* The audio inforame only has one set of subpack registers.  The hdmi
+        * block pads the rest of the data as per the spec so we have to fixup
+        * the length before filling in the subpacks.
+        */
+       if (offset == HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER)
+               size = 6;
+
+       /* each subpack 7 bytes devided into:
+        *   subpack_low - bytes 0 - 3
+        *   subpack_high - bytes 4 - 6 (with byte 7 padded to 0x00)
+        */
+       for (i = 0; i < size; i++) {
+               size_t index = i % 7;
+
+               if (index == 0)
+                       memset(subpack, 0x0, sizeof(subpack));
+
+               ((u8 *)subpack)[index] = ptr[i];
+
+               if (index == 6 || (i + 1 == size)) {
+                       unsigned int reg = offset + 1 + (i / 7) * 2;
+
+                       tegra_hdmi_writel(hdmi, subpack[0], reg);
+                       tegra_hdmi_writel(hdmi, subpack[1], reg + 1);
+               }
+       }
+}
+
+static void tegra_hdmi_setup_avi_infoframe(struct tegra_hdmi *hdmi,
+                                          struct drm_display_mode *mode)
+{
+       struct hdmi_avi_infoframe frame;
+       unsigned int h_front_porch;
+       unsigned int hsize = 16;
+       unsigned int vsize = 9;
+
+       if (hdmi->dvi) {
+               tegra_hdmi_writel(hdmi, 0,
+                                 HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
+               return;
+       }
+
+       h_front_porch = mode->htotal - mode->hsync_end;
+       memset(&frame, 0, sizeof(frame));
+       frame.r = HDMI_AVI_R_SAME;
+
+       switch (mode->vdisplay) {
+       case 480:
+               if (mode->hdisplay == 640) {
+                       frame.m = HDMI_AVI_M_4_3;
+                       frame.vic = 1;
+               } else {
+                       frame.m = HDMI_AVI_M_16_9;
+                       frame.vic = 3;
+               }
+               break;
+
+       case 576:
+               if (((hsize * 10) / vsize) > 14) {
+                       frame.m = HDMI_AVI_M_16_9;
+                       frame.vic = 18;
+               } else {
+                       frame.m = HDMI_AVI_M_4_3;
+                       frame.vic = 17;
+               }
+               break;
+
+       case 720:
+       case 1470: /* stereo mode */
+               frame.m = HDMI_AVI_M_16_9;
+
+               if (h_front_porch == 110)
+                       frame.vic = 4;
+               else
+                       frame.vic = 19;
+               break;
+
+       case 1080:
+       case 2205: /* stereo mode */
+               frame.m = HDMI_AVI_M_16_9;
+
+               switch (h_front_porch) {
+               case 88:
+                       frame.vic = 16;
+                       break;
+
+               case 528:
+                       frame.vic = 31;
+                       break;
+
+               default:
+                       frame.vic = 32;
+                       break;
+               }
+               break;
+
+       default:
+               frame.m = HDMI_AVI_M_16_9;
+               frame.vic = 0;
+               break;
+       }
+
+       tegra_hdmi_write_infopack(hdmi, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER,
+                                 HDMI_INFOFRAME_TYPE_AVI, HDMI_AVI_VERSION,
+                                 &frame, sizeof(frame));
+
+       tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE,
+                         HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
+}
+
+static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi)
+{
+       struct hdmi_audio_infoframe frame;
+
+       if (hdmi->dvi) {
+               tegra_hdmi_writel(hdmi, 0,
+                                 HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
+               return;
+       }
+
+       memset(&frame, 0, sizeof(frame));
+       frame.cc = HDMI_AUDIO_CC_2;
+
+       tegra_hdmi_write_infopack(hdmi,
+                                 HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER,
+                                 HDMI_INFOFRAME_TYPE_AUDIO,
+                                 HDMI_AUDIO_VERSION,
+                                 &frame, sizeof(frame));
+
+       tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE,
+                         HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
+}
+
+static void tegra_hdmi_setup_stereo_infoframe(struct tegra_hdmi *hdmi)
+{
+       struct hdmi_stereo_infoframe frame;
+       unsigned long value;
+
+       if (!hdmi->stereo) {
+               value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+               value &= ~GENERIC_CTRL_ENABLE;
+               tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+               return;
+       }
+
+       memset(&frame, 0, sizeof(frame));
+       frame.regid0 = 0x03;
+       frame.regid1 = 0x0c;
+       frame.regid2 = 0x00;
+       frame.hdmi_video_format = 2;
+
+       /* TODO: 74 MHz limit? */
+       if (1) {
+               frame._3d_structure = 0;
+       } else {
+               frame._3d_structure = 8;
+               frame._3d_ext_data = 0;
+       }
+
+       tegra_hdmi_write_infopack(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_HEADER,
+                                 HDMI_INFOFRAME_TYPE_VENDOR,
+                                 HDMI_VENDOR_VERSION, &frame, 6);
+
+       value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+       value |= GENERIC_CTRL_ENABLE;
+       tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+}
+
+static void tegra_hdmi_setup_tmds(struct tegra_hdmi *hdmi,
+                                 const struct tmds_config *tmds)
+{
+       unsigned long value;
+
+       tegra_hdmi_writel(hdmi, tmds->pll0, HDMI_NV_PDISP_SOR_PLL0);
+       tegra_hdmi_writel(hdmi, tmds->pll1, HDMI_NV_PDISP_SOR_PLL1);
+       tegra_hdmi_writel(hdmi, tmds->pe_current, HDMI_NV_PDISP_PE_CURRENT);
+
+       value = tmds->drive_current | DRIVE_CURRENT_FUSE_OVERRIDE;
+       tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT);
+}
+
+static int tegra_output_hdmi_enable(struct tegra_output *output)
+{
+       unsigned int h_sync_width, h_front_porch, h_back_porch, i, rekey;
+       struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
+       struct drm_display_mode *mode = &dc->base.mode;
+       struct tegra_hdmi *hdmi = to_hdmi(output);
+       struct device_node *node = hdmi->dev->of_node;
+       unsigned int pulse_start, div82, pclk;
+       const struct tmds_config *tmds;
+       unsigned int num_tmds;
+       unsigned long value;
+       int retries = 1000;
+       int err;
+
+       pclk = mode->clock * 1000;
+       h_sync_width = mode->hsync_end - mode->hsync_start;
+       h_front_porch = mode->htotal - mode->hsync_end;
+       h_back_porch = mode->hsync_start - mode->hdisplay;
+
+       err = regulator_enable(hdmi->vdd);
+       if (err < 0) {
+               dev_err(hdmi->dev, "failed to enable VDD regulator: %d\n", err);
+               return err;
+       }
+
+       err = regulator_enable(hdmi->pll);
+       if (err < 0) {
+               dev_err(hdmi->dev, "failed to enable PLL regulator: %d\n", err);
+               return err;
+       }
+
+       /*
+        * This assumes that the display controller will divide its parent
+        * clock by 2 to generate the pixel clock.
+        */
+       err = tegra_output_setup_clock(output, hdmi->clk, pclk * 2);
+       if (err < 0) {
+               dev_err(hdmi->dev, "failed to setup clock: %d\n", err);
+               return err;
+       }
+
+       err = clk_set_rate(hdmi->clk, pclk);
+       if (err < 0)
+               return err;
+
+       err = clk_enable(hdmi->clk);
+       if (err < 0) {
+               dev_err(hdmi->dev, "failed to enable clock: %d\n", err);
+               return err;
+       }
+
+       tegra_periph_reset_assert(hdmi->clk);
+       usleep_range(1000, 2000);
+       tegra_periph_reset_deassert(hdmi->clk);
+
+       tegra_dc_writel(dc, VSYNC_H_POSITION(1),
+                       DC_DISP_DISP_TIMING_OPTIONS);
+       tegra_dc_writel(dc, DITHER_CONTROL_DISABLE | BASE_COLOR_SIZE888,
+                       DC_DISP_DISP_COLOR_CONTROL);
+
+       /* video_preamble uses h_pulse2 */
+       pulse_start = 1 + h_sync_width + h_back_porch - 10;
+
+       tegra_dc_writel(dc, H_PULSE_2_ENABLE, DC_DISP_DISP_SIGNAL_OPTIONS0);
+
+       value = PULSE_MODE_NORMAL | PULSE_POLARITY_HIGH | PULSE_QUAL_VACTIVE |
+               PULSE_LAST_END_A;
+       tegra_dc_writel(dc, value, DC_DISP_H_PULSE2_CONTROL);
+
+       value = PULSE_START(pulse_start) | PULSE_END(pulse_start + 8);
+       tegra_dc_writel(dc, value, DC_DISP_H_PULSE2_POSITION_A);
+
+       value = VSYNC_WINDOW_END(0x210) | VSYNC_WINDOW_START(0x200) |
+               VSYNC_WINDOW_ENABLE;
+       tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_VSYNC_WINDOW);
+
+       if (dc->pipe)
+               value = HDMI_SRC_DISPLAYB;
+       else
+               value = HDMI_SRC_DISPLAYA;
+
+       if ((mode->hdisplay == 720) && ((mode->vdisplay == 480) ||
+                                       (mode->vdisplay == 576)))
+               tegra_hdmi_writel(hdmi,
+                                 value | ARM_VIDEO_RANGE_FULL,
+                                 HDMI_NV_PDISP_INPUT_CONTROL);
+       else
+               tegra_hdmi_writel(hdmi,
+                                 value | ARM_VIDEO_RANGE_LIMITED,
+                                 HDMI_NV_PDISP_INPUT_CONTROL);
+
+       div82 = clk_get_rate(hdmi->clk) / 1000000 * 4;
+       value = SOR_REFCLK_DIV_INT(div82 >> 2) | SOR_REFCLK_DIV_FRAC(div82);
+       tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_REFCLK);
+
+       if (!hdmi->dvi) {
+               err = tegra_hdmi_setup_audio(hdmi, pclk);
+               if (err < 0)
+                       hdmi->dvi = true;
+       }
+
+       if (of_device_is_compatible(node, "nvidia,tegra20-hdmi")) {
+               /*
+                * TODO: add ELD support
+                */
+       }
+
+       rekey = HDMI_REKEY_DEFAULT;
+       value = HDMI_CTRL_REKEY(rekey);
+       value |= HDMI_CTRL_MAX_AC_PACKET((h_sync_width + h_back_porch +
+                                         h_front_porch - rekey - 18) / 32);
+
+       if (!hdmi->dvi)
+               value |= HDMI_CTRL_ENABLE;
+
+       tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_CTRL);
+
+       if (hdmi->dvi)
+               tegra_hdmi_writel(hdmi, 0x0,
+                                 HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+       else
+               tegra_hdmi_writel(hdmi, GENERIC_CTRL_AUDIO,
+                                 HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+
+       tegra_hdmi_setup_avi_infoframe(hdmi, mode);
+       tegra_hdmi_setup_audio_infoframe(hdmi);
+       tegra_hdmi_setup_stereo_infoframe(hdmi);
+
+       /* TMDS CONFIG */
+       if (of_device_is_compatible(node, "nvidia,tegra30-hdmi")) {
+               num_tmds = ARRAY_SIZE(tegra3_tmds_config);
+               tmds = tegra3_tmds_config;
+       } else {
+               num_tmds = ARRAY_SIZE(tegra2_tmds_config);
+               tmds = tegra2_tmds_config;
+       }
+
+       for (i = 0; i < num_tmds; i++) {
+               if (pclk <= tmds[i].pclk) {
+                       tegra_hdmi_setup_tmds(hdmi, &tmds[i]);
+                       break;
+               }
+       }
+
+       tegra_hdmi_writel(hdmi,
+                         SOR_SEQ_CTL_PU_PC(0) |
+                         SOR_SEQ_PU_PC_ALT(0) |
+                         SOR_SEQ_PD_PC(8) |
+                         SOR_SEQ_PD_PC_ALT(8),
+                         HDMI_NV_PDISP_SOR_SEQ_CTL);
+
+       value = SOR_SEQ_INST_WAIT_TIME(1) |
+               SOR_SEQ_INST_WAIT_UNITS_VSYNC |
+               SOR_SEQ_INST_HALT |
+               SOR_SEQ_INST_PIN_A_LOW |
+               SOR_SEQ_INST_PIN_B_LOW |
+               SOR_SEQ_INST_DRIVE_PWM_OUT_LO;
+
+       tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_SEQ_INST(0));
+       tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_SEQ_INST(8));
+
+       value = 0x1c800;
+       value &= ~SOR_CSTM_ROTCLK(~0);
+       value |= SOR_CSTM_ROTCLK(2);
+       tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_CSTM);
+
+       tegra_dc_writel(dc, DISP_CTRL_MODE_STOP, DC_CMD_DISPLAY_COMMAND);
+       tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
+       tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
+
+       /* start SOR */
+       tegra_hdmi_writel(hdmi,
+                         SOR_PWR_NORMAL_STATE_PU |
+                         SOR_PWR_NORMAL_START_NORMAL |
+                         SOR_PWR_SAFE_STATE_PD |
+                         SOR_PWR_SETTING_NEW_TRIGGER,
+                         HDMI_NV_PDISP_SOR_PWR);
+       tegra_hdmi_writel(hdmi,
+                         SOR_PWR_NORMAL_STATE_PU |
+                         SOR_PWR_NORMAL_START_NORMAL |
+                         SOR_PWR_SAFE_STATE_PD |
+                         SOR_PWR_SETTING_NEW_DONE,
+                         HDMI_NV_PDISP_SOR_PWR);
+
+       do {
+               BUG_ON(--retries < 0);
+               value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_PWR);
+       } while (value & SOR_PWR_SETTING_NEW_PENDING);
+
+       value = SOR_STATE_ASY_CRCMODE_COMPLETE |
+               SOR_STATE_ASY_OWNER_HEAD0 |
+               SOR_STATE_ASY_SUBOWNER_BOTH |
+               SOR_STATE_ASY_PROTOCOL_SINGLE_TMDS_A |
+               SOR_STATE_ASY_DEPOL_POS;
+
+       /* setup sync polarities */
+       if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+               value |= SOR_STATE_ASY_HSYNCPOL_POS;
+
+       if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+               value |= SOR_STATE_ASY_HSYNCPOL_NEG;
+
+       if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+               value |= SOR_STATE_ASY_VSYNCPOL_POS;
+
+       if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+               value |= SOR_STATE_ASY_VSYNCPOL_NEG;
+
+       tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_STATE2);
+
+       value = SOR_STATE_ASY_HEAD_OPMODE_AWAKE | SOR_STATE_ASY_ORMODE_NORMAL;
+       tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_STATE1);
+
+       tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_SOR_STATE0);
+       tegra_hdmi_writel(hdmi, SOR_STATE_UPDATE, HDMI_NV_PDISP_SOR_STATE0);
+       tegra_hdmi_writel(hdmi, value | SOR_STATE_ATTACHED,
+                         HDMI_NV_PDISP_SOR_STATE1);
+       tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_SOR_STATE0);
+
+       tegra_dc_writel(dc, HDMI_ENABLE, DC_DISP_DISP_WIN_OPTIONS);
+
+       value = PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
+               PW4_ENABLE | PM0_ENABLE | PM1_ENABLE;
+       tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
+
+       value = DISP_CTRL_MODE_C_DISPLAY;
+       tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND);
+
+       tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
+       tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
+
+       /* TODO: add HDCP support */
+
+       return 0;
+}
+
+static int tegra_output_hdmi_disable(struct tegra_output *output)
+{
+       struct tegra_hdmi *hdmi = to_hdmi(output);
+
+       tegra_periph_reset_assert(hdmi->clk);
+       clk_disable(hdmi->clk);
+       regulator_disable(hdmi->pll);
+       regulator_disable(hdmi->vdd);
+
+       return 0;
+}
+
+static int tegra_output_hdmi_setup_clock(struct tegra_output *output,
+                                        struct clk *clk, unsigned long pclk)
+{
+       struct tegra_hdmi *hdmi = to_hdmi(output);
+       struct clk *base;
+       int err;
+
+       err = clk_set_parent(clk, hdmi->clk_parent);
+       if (err < 0) {
+               dev_err(output->dev, "failed to set parent: %d\n", err);
+               return err;
+       }
+
+       base = clk_get_parent(hdmi->clk_parent);
+
+       /*
+        * This assumes that the parent clock is pll_d_out0 or pll_d2_out
+        * respectively, each of which divides the base pll_d by 2.
+        */
+       err = clk_set_rate(base, pclk * 2);
+       if (err < 0)
+               dev_err(output->dev,
+                       "failed to set base clock rate to %lu Hz\n",
+                       pclk * 2);
+
+       return 0;
+}
+
+static int tegra_output_hdmi_check_mode(struct tegra_output *output,
+                                       struct drm_display_mode *mode,
+                                       enum drm_mode_status *status)
+{
+       struct tegra_hdmi *hdmi = to_hdmi(output);
+       unsigned long pclk = mode->clock * 1000;
+       struct clk *parent;
+       long err;
+
+       parent = clk_get_parent(hdmi->clk_parent);
+
+       err = clk_round_rate(parent, pclk * 4);
+       if (err < 0)
+               *status = MODE_NOCLOCK;
+       else
+               *status = MODE_OK;
+
+       return 0;
+}
+
+static const struct tegra_output_ops hdmi_ops = {
+       .enable = tegra_output_hdmi_enable,
+       .disable = tegra_output_hdmi_disable,
+       .setup_clock = tegra_output_hdmi_setup_clock,
+       .check_mode = tegra_output_hdmi_check_mode,
+};
+
+static int tegra_hdmi_show_regs(struct seq_file *s, void *data)
+{
+       struct drm_info_node *node = s->private;
+       struct tegra_hdmi *hdmi = node->info_ent->data;
+
+#define DUMP_REG(name)                                         \
+       seq_printf(s, "%-56s %#05x %08lx\n", #name, name,       \
+               tegra_hdmi_readl(hdmi, name))
+
+       DUMP_REG(HDMI_CTXSW);
+       DUMP_REG(HDMI_NV_PDISP_SOR_STATE0);
+       DUMP_REG(HDMI_NV_PDISP_SOR_STATE1);
+       DUMP_REG(HDMI_NV_PDISP_SOR_STATE2);
+       DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AN_MSB);
+       DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AN_LSB);
+       DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CN_MSB);
+       DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CN_LSB);
+       DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AKSV_MSB);
+       DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AKSV_LSB);
+       DUMP_REG(HDMI_NV_PDISP_RG_HDCP_BKSV_MSB);
+       DUMP_REG(HDMI_NV_PDISP_RG_HDCP_BKSV_LSB);
+       DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CKSV_MSB);
+       DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CKSV_LSB);
+       DUMP_REG(HDMI_NV_PDISP_RG_HDCP_DKSV_MSB);
+       DUMP_REG(HDMI_NV_PDISP_RG_HDCP_DKSV_LSB);
+       DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CTRL);
+       DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CMODE);
+       DUMP_REG(HDMI_NV_PDISP_RG_HDCP_MPRIME_MSB);
+       DUMP_REG(HDMI_NV_PDISP_RG_HDCP_MPRIME_LSB);
+       DUMP_REG(HDMI_NV_PDISP_RG_HDCP_SPRIME_MSB);
+       DUMP_REG(HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB2);
+       DUMP_REG(HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB1);
+       DUMP_REG(HDMI_NV_PDISP_RG_HDCP_RI);
+       DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CS_MSB);
+       DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CS_LSB);
+       DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU0);
+       DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU_RDATA0);
+       DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU1);
+       DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU2);
+       DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
+       DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_STATUS);
+       DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER);
+       DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_LOW);
+       DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_HIGH);
+       DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
+       DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_STATUS);
+       DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER);
+       DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_LOW);
+       DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH);
+       DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_LOW);
+       DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH);
+       DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+       DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_STATUS);
+       DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_HEADER);
+       DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_LOW);
+       DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_HIGH);
+       DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_LOW);
+       DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_HIGH);
+       DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_LOW);
+       DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_HIGH);
+       DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_LOW);
+       DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_HIGH);
+       DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_CTRL);
+       DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_LOW);
+       DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_HIGH);
+       DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW);
+       DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH);
+       DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_LOW);
+       DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_HIGH);
+       DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_LOW);
+       DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_HIGH);
+       DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_LOW);
+       DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_HIGH);
+       DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_LOW);
+       DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_HIGH);
+       DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_LOW);
+       DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_HIGH);
+       DUMP_REG(HDMI_NV_PDISP_HDMI_CTRL);
+       DUMP_REG(HDMI_NV_PDISP_HDMI_VSYNC_KEEPOUT);
+       DUMP_REG(HDMI_NV_PDISP_HDMI_VSYNC_WINDOW);
+       DUMP_REG(HDMI_NV_PDISP_HDMI_GCP_CTRL);
+       DUMP_REG(HDMI_NV_PDISP_HDMI_GCP_STATUS);
+       DUMP_REG(HDMI_NV_PDISP_HDMI_GCP_SUBPACK);
+       DUMP_REG(HDMI_NV_PDISP_HDMI_CHANNEL_STATUS1);
+       DUMP_REG(HDMI_NV_PDISP_HDMI_CHANNEL_STATUS2);
+       DUMP_REG(HDMI_NV_PDISP_HDMI_EMU0);
+       DUMP_REG(HDMI_NV_PDISP_HDMI_EMU1);
+       DUMP_REG(HDMI_NV_PDISP_HDMI_EMU1_RDATA);
+       DUMP_REG(HDMI_NV_PDISP_HDMI_SPARE);
+       DUMP_REG(HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS1);
+       DUMP_REG(HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS2);
+       DUMP_REG(HDMI_NV_PDISP_HDMI_HDCPRIF_ROM_CTRL);
+       DUMP_REG(HDMI_NV_PDISP_SOR_CAP);
+       DUMP_REG(HDMI_NV_PDISP_SOR_PWR);
+       DUMP_REG(HDMI_NV_PDISP_SOR_TEST);
+       DUMP_REG(HDMI_NV_PDISP_SOR_PLL0);
+       DUMP_REG(HDMI_NV_PDISP_SOR_PLL1);
+       DUMP_REG(HDMI_NV_PDISP_SOR_PLL2);
+       DUMP_REG(HDMI_NV_PDISP_SOR_CSTM);
+       DUMP_REG(HDMI_NV_PDISP_SOR_LVDS);
+       DUMP_REG(HDMI_NV_PDISP_SOR_CRCA);
+       DUMP_REG(HDMI_NV_PDISP_SOR_CRCB);
+       DUMP_REG(HDMI_NV_PDISP_SOR_BLANK);
+       DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_CTL);
+       DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(0));
+       DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(1));
+       DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(2));
+       DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(3));
+       DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(4));
+       DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(5));
+       DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(6));
+       DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(7));
+       DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(8));
+       DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(9));
+       DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(10));
+       DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(11));
+       DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(12));
+       DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(13));
+       DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(14));
+       DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(15));
+       DUMP_REG(HDMI_NV_PDISP_SOR_VCRCA0);
+       DUMP_REG(HDMI_NV_PDISP_SOR_VCRCA1);
+       DUMP_REG(HDMI_NV_PDISP_SOR_CCRCA0);
+       DUMP_REG(HDMI_NV_PDISP_SOR_CCRCA1);
+       DUMP_REG(HDMI_NV_PDISP_SOR_EDATAA0);
+       DUMP_REG(HDMI_NV_PDISP_SOR_EDATAA1);
+       DUMP_REG(HDMI_NV_PDISP_SOR_COUNTA0);
+       DUMP_REG(HDMI_NV_PDISP_SOR_COUNTA1);
+       DUMP_REG(HDMI_NV_PDISP_SOR_DEBUGA0);
+       DUMP_REG(HDMI_NV_PDISP_SOR_DEBUGA1);
+       DUMP_REG(HDMI_NV_PDISP_SOR_TRIG);
+       DUMP_REG(HDMI_NV_PDISP_SOR_MSCHECK);
+       DUMP_REG(HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT);
+       DUMP_REG(HDMI_NV_PDISP_AUDIO_DEBUG0);
+       DUMP_REG(HDMI_NV_PDISP_AUDIO_DEBUG1);
+       DUMP_REG(HDMI_NV_PDISP_AUDIO_DEBUG2);
+       DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(0));
+       DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(1));
+       DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(2));
+       DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(3));
+       DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(4));
+       DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(5));
+       DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(6));
+       DUMP_REG(HDMI_NV_PDISP_AUDIO_PULSE_WIDTH);
+       DUMP_REG(HDMI_NV_PDISP_AUDIO_THRESHOLD);
+       DUMP_REG(HDMI_NV_PDISP_AUDIO_CNTRL0);
+       DUMP_REG(HDMI_NV_PDISP_AUDIO_N);
+       DUMP_REG(HDMI_NV_PDISP_HDCPRIF_ROM_TIMING);
+       DUMP_REG(HDMI_NV_PDISP_SOR_REFCLK);
+       DUMP_REG(HDMI_NV_PDISP_CRC_CONTROL);
+       DUMP_REG(HDMI_NV_PDISP_INPUT_CONTROL);
+       DUMP_REG(HDMI_NV_PDISP_SCRATCH);
+       DUMP_REG(HDMI_NV_PDISP_PE_CURRENT);
+       DUMP_REG(HDMI_NV_PDISP_KEY_CTRL);
+       DUMP_REG(HDMI_NV_PDISP_KEY_DEBUG0);
+       DUMP_REG(HDMI_NV_PDISP_KEY_DEBUG1);
+       DUMP_REG(HDMI_NV_PDISP_KEY_DEBUG2);
+       DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_0);
+       DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_1);
+       DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_2);
+       DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_3);
+       DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_TRIG);
+       DUMP_REG(HDMI_NV_PDISP_KEY_SKEY_INDEX);
+       DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_CNTRL0);
+       DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR);
+       DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE);
+
+#undef DUMP_REG
+
+       return 0;
+}
+
+static struct drm_info_list debugfs_files[] = {
+       { "regs", tegra_hdmi_show_regs, 0, NULL },
+};
+
+static int tegra_hdmi_debugfs_init(struct tegra_hdmi *hdmi,
+                                  struct drm_minor *minor)
+{
+       unsigned int i;
+       int err;
+
+       hdmi->debugfs = debugfs_create_dir("hdmi", minor->debugfs_root);
+       if (!hdmi->debugfs)
+               return -ENOMEM;
+
+       hdmi->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files),
+                                     GFP_KERNEL);
+       if (!hdmi->debugfs_files) {
+               err = -ENOMEM;
+               goto remove;
+       }
+
+       for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
+               hdmi->debugfs_files[i].data = hdmi;
+
+       err = drm_debugfs_create_files(hdmi->debugfs_files,
+                                      ARRAY_SIZE(debugfs_files),
+                                      hdmi->debugfs, minor);
+       if (err < 0)
+               goto free;
+
+       hdmi->minor = minor;
+
+       return 0;
+
+free:
+       kfree(hdmi->debugfs_files);
+       hdmi->debugfs_files = NULL;
+remove:
+       debugfs_remove(hdmi->debugfs);
+       hdmi->debugfs = NULL;
+
+       return err;
+}
+
+static int tegra_hdmi_debugfs_exit(struct tegra_hdmi *hdmi)
+{
+       drm_debugfs_remove_files(hdmi->debugfs_files, ARRAY_SIZE(debugfs_files),
+                                hdmi->minor);
+       hdmi->minor = NULL;
+
+       kfree(hdmi->debugfs_files);
+       hdmi->debugfs_files = NULL;
+
+       debugfs_remove(hdmi->debugfs);
+       hdmi->debugfs = NULL;
+
+       return 0;
+}
+
+static int tegra_hdmi_drm_init(struct host1x_client *client,
+                              struct drm_device *drm)
+{
+       struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client);
+       int err;
+
+       hdmi->output.type = TEGRA_OUTPUT_HDMI;
+       hdmi->output.dev = client->dev;
+       hdmi->output.ops = &hdmi_ops;
+
+       err = tegra_output_init(drm, &hdmi->output);
+       if (err < 0) {
+               dev_err(client->dev, "output setup failed: %d\n", err);
+               return err;
+       }
+
+       if (IS_ENABLED(CONFIG_DEBUG_FS)) {
+               err = tegra_hdmi_debugfs_init(hdmi, drm->primary);
+               if (err < 0)
+                       dev_err(client->dev, "debugfs setup failed: %d\n", err);
+       }
+
+       return 0;
+}
+
+static int tegra_hdmi_drm_exit(struct host1x_client *client)
+{
+       struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client);
+       int err;
+
+       if (IS_ENABLED(CONFIG_DEBUG_FS)) {
+               err = tegra_hdmi_debugfs_exit(hdmi);
+               if (err < 0)
+                       dev_err(client->dev, "debugfs cleanup failed: %d\n",
+                               err);
+       }
+
+       err = tegra_output_disable(&hdmi->output);
+       if (err < 0) {
+               dev_err(client->dev, "output failed to disable: %d\n", err);
+               return err;
+       }
+
+       err = tegra_output_exit(&hdmi->output);
+       if (err < 0) {
+               dev_err(client->dev, "output cleanup failed: %d\n", err);
+               return err;
+       }
+
+       return 0;
+}
+
+static const struct host1x_client_ops hdmi_client_ops = {
+       .drm_init = tegra_hdmi_drm_init,
+       .drm_exit = tegra_hdmi_drm_exit,
+};
+
+static int tegra_hdmi_probe(struct platform_device *pdev)
+{
+       struct host1x *host1x = dev_get_drvdata(pdev->dev.parent);
+       struct tegra_hdmi *hdmi;
+       struct resource *regs;
+       int err;
+
+       hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
+       if (!hdmi)
+               return -ENOMEM;
+
+       hdmi->dev = &pdev->dev;
+       hdmi->audio_source = AUTO;
+       hdmi->audio_freq = 44100;
+       hdmi->stereo = false;
+       hdmi->dvi = false;
+
+       hdmi->clk = devm_clk_get(&pdev->dev, NULL);
+       if (IS_ERR(hdmi->clk)) {
+               dev_err(&pdev->dev, "failed to get clock\n");
+               return PTR_ERR(hdmi->clk);
+       }
+
+       err = clk_prepare(hdmi->clk);
+       if (err < 0)
+               return err;
+
+       hdmi->clk_parent = devm_clk_get(&pdev->dev, "parent");
+       if (IS_ERR(hdmi->clk_parent))
+               return PTR_ERR(hdmi->clk_parent);
+
+       err = clk_prepare(hdmi->clk_parent);
+       if (err < 0)
+               return err;
+
+       err = clk_set_parent(hdmi->clk, hdmi->clk_parent);
+       if (err < 0) {
+               dev_err(&pdev->dev, "failed to setup clocks: %d\n", err);
+               return err;
+       }
+
+       hdmi->vdd = devm_regulator_get(&pdev->dev, "vdd");
+       if (IS_ERR(hdmi->vdd)) {
+               dev_err(&pdev->dev, "failed to get VDD regulator\n");
+               return PTR_ERR(hdmi->vdd);
+       }
+
+       hdmi->pll = devm_regulator_get(&pdev->dev, "pll");
+       if (IS_ERR(hdmi->pll)) {
+               dev_err(&pdev->dev, "failed to get PLL regulator\n");
+               return PTR_ERR(hdmi->pll);
+       }
+
+       hdmi->output.dev = &pdev->dev;
+
+       err = tegra_output_parse_dt(&hdmi->output);
+       if (err < 0)
+               return err;
+
+       regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!regs)
+               return -ENXIO;
+
+       hdmi->regs = devm_request_and_ioremap(&pdev->dev, regs);
+       if (!hdmi->regs)
+               return -EADDRNOTAVAIL;
+
+       err = platform_get_irq(pdev, 0);
+       if (err < 0)
+               return err;
+
+       hdmi->irq = err;
+
+       hdmi->client.ops = &hdmi_client_ops;
+       INIT_LIST_HEAD(&hdmi->client.list);
+       hdmi->client.dev = &pdev->dev;
+
+       err = host1x_register_client(host1x, &hdmi->client);
+       if (err < 0) {
+               dev_err(&pdev->dev, "failed to register host1x client: %d\n",
+                       err);
+               return err;
+       }
+
+       platform_set_drvdata(pdev, hdmi);
+
+       return 0;
+}
+
+static int tegra_hdmi_remove(struct platform_device *pdev)
+{
+       struct host1x *host1x = dev_get_drvdata(pdev->dev.parent);
+       struct tegra_hdmi *hdmi = platform_get_drvdata(pdev);
+       int err;
+
+       err = host1x_unregister_client(host1x, &hdmi->client);
+       if (err < 0) {
+               dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
+                       err);
+               return err;
+       }
+
+       clk_unprepare(hdmi->clk_parent);
+       clk_unprepare(hdmi->clk);
+
+       return 0;
+}
+
+static struct of_device_id tegra_hdmi_of_match[] = {
+       { .compatible = "nvidia,tegra30-hdmi", },
+       { .compatible = "nvidia,tegra20-hdmi", },
+       { },
+};
+
+struct platform_driver tegra_hdmi_driver = {
+       .driver = {
+               .name = "tegra-hdmi",
+               .owner = THIS_MODULE,
+               .of_match_table = tegra_hdmi_of_match,
+       },
+       .probe = tegra_hdmi_probe,
+       .remove = tegra_hdmi_remove,
+};
diff --git a/drivers/gpu/drm/tegra/hdmi.h b/drivers/gpu/drm/tegra/hdmi.h
new file mode 100644 (file)
index 0000000..1477f36
--- /dev/null
@@ -0,0 +1,575 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012 NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef TEGRA_HDMI_H
+#define TEGRA_HDMI_H 1
+
+#define HDMI_INFOFRAME_TYPE_VENDOR   0x81
+#define HDMI_INFOFRAME_TYPE_AVI      0x82
+#define HDMI_INFOFRAME_TYPE_SPD      0x83
+#define HDMI_INFOFRAME_TYPE_AUDIO    0x84
+#define HDMI_INFOFRAME_TYPE_MPEG_SRC 0x85
+#define HDMI_INFOFRAME_TYPE_NTSC_VBI 0x86
+
+/* all fields little endian */
+struct hdmi_avi_infoframe {
+       /* PB0 */
+       u8 csum;
+
+       /* PB1 */
+       unsigned s:2; /* scan information */
+       unsigned b:2; /* bar info data valid */
+       unsigned a:1; /* active info present */
+       unsigned y:2; /* RGB or YCbCr */
+       unsigned res1:1;
+
+       /* PB2 */
+       unsigned r:4; /* active format aspect ratio */
+       unsigned m:2; /* picture aspect ratio */
+       unsigned c:2; /* colorimetry */
+
+       /* PB3 */
+       unsigned sc:2;  /* scan information */
+       unsigned q:2;   /* quantization range */
+       unsigned ec:3;  /* extended colorimetry */
+       unsigned itc:1; /* it content */
+
+       /* PB4 */
+       unsigned vic:7; /* video format id code */
+       unsigned res4:1;
+
+       /* PB5 */
+       unsigned pr:4; /* pixel repetition factor */
+       unsigned cn:2; /* it content type*/
+       unsigned yq:2; /* ycc quantization range */
+
+       /* PB6-7 */
+       u16 top_bar_end_line;
+
+       /* PB8-9 */
+       u16 bot_bar_start_line;
+
+       /* PB10-11 */
+       u16 left_bar_end_pixel;
+
+       /* PB12-13 */
+       u16 right_bar_start_pixel;
+} __packed;
+
+#define HDMI_AVI_VERSION 0x02
+
+#define HDMI_AVI_Y_RGB       0x0
+#define HDMI_AVI_Y_YCBCR_422 0x1
+#define HDMI_AVI_Y_YCBCR_444 0x2
+
+#define HDMI_AVI_B_VERT  0x1
+#define HDMI_AVI_B_HORIZ 0x2
+
+#define HDMI_AVI_S_NONE      0x0
+#define HDMI_AVI_S_OVERSCAN  0x1
+#define HDMI_AVI_S_UNDERSCAN 0x2
+
+#define HDMI_AVI_C_NONE     0x0
+#define HDMI_AVI_C_SMPTE    0x1
+#define HDMI_AVI_C_ITU_R    0x2
+#define HDMI_AVI_C_EXTENDED 0x4
+
+#define HDMI_AVI_M_4_3  0x1
+#define HDMI_AVI_M_16_9 0x2
+
+#define HDMI_AVI_R_SAME        0x8
+#define HDMI_AVI_R_4_3_CENTER  0x9
+#define HDMI_AVI_R_16_9_CENTER 0xa
+#define HDMI_AVI_R_14_9_CENTER 0xb
+
+/* all fields little endian */
+struct hdmi_audio_infoframe {
+       /* PB0 */
+       u8 csum;
+
+       /* PB1 */
+       unsigned cc:3; /* channel count */
+       unsigned res1:1;
+       unsigned ct:4; /* coding type */
+
+       /* PB2 */
+       unsigned ss:2; /* sample size */
+       unsigned sf:3; /* sample frequency */
+       unsigned res2:3;
+
+       /* PB3 */
+       unsigned cxt:5; /* coding extention type */
+       unsigned res3:3;
+
+       /* PB4 */
+       u8 ca; /* channel/speaker allocation */
+
+       /* PB5 */
+       unsigned res5:3;
+       unsigned lsv:4; /* level shift value */
+       unsigned dm_inh:1; /* downmix inhibit */
+
+       /* PB6-10 reserved */
+       u8 res6;
+       u8 res7;
+       u8 res8;
+       u8 res9;
+       u8 res10;
+} __packed;
+
+#define HDMI_AUDIO_VERSION 0x01
+
+#define HDMI_AUDIO_CC_STREAM 0x0 /* specified by audio stream */
+#define HDMI_AUDIO_CC_2      0x1
+#define HDMI_AUDIO_CC_3      0x2
+#define HDMI_AUDIO_CC_4      0x3
+#define HDMI_AUDIO_CC_5      0x4
+#define HDMI_AUDIO_CC_6      0x5
+#define HDMI_AUDIO_CC_7      0x6
+#define HDMI_AUDIO_CC_8      0x7
+
+#define HDMI_AUDIO_CT_STREAM  0x0 /* specified by audio stream */
+#define HDMI_AUDIO_CT_PCM     0x1
+#define HDMI_AUDIO_CT_AC3     0x2
+#define HDMI_AUDIO_CT_MPEG1   0x3
+#define HDMI_AUDIO_CT_MP3     0x4
+#define HDMI_AUDIO_CT_MPEG2   0x5
+#define HDMI_AUDIO_CT_AAC_LC  0x6
+#define HDMI_AUDIO_CT_DTS     0x7
+#define HDMI_AUDIO_CT_ATRAC   0x8
+#define HDMI_AUDIO_CT_DSD     0x9
+#define HDMI_AUDIO_CT_E_AC3   0xa
+#define HDMI_AUDIO_CT_DTS_HD  0xb
+#define HDMI_AUDIO_CT_MLP     0xc
+#define HDMI_AUDIO_CT_DST     0xd
+#define HDMI_AUDIO_CT_WMA_PRO 0xe
+#define HDMI_AUDIO_CT_CXT     0xf
+
+#define HDMI_AUDIO_SF_STREAM 0x0 /* specified by audio stream */
+#define HDMI_AUIDO_SF_32K    0x1
+#define HDMI_AUDIO_SF_44_1K  0x2
+#define HDMI_AUDIO_SF_48K    0x3
+#define HDMI_AUDIO_SF_88_2K  0x4
+#define HDMI_AUDIO_SF_96K    0x5
+#define HDMI_AUDIO_SF_176_4K 0x6
+#define HDMI_AUDIO_SF_192K   0x7
+
+#define HDMI_AUDIO_SS_STREAM 0x0 /* specified by audio stream */
+#define HDMI_AUDIO_SS_16BIT  0x1
+#define HDMI_AUDIO_SS_20BIT  0x2
+#define HDMI_AUDIO_SS_24BIT  0x3
+
+#define HDMI_AUDIO_CXT_CT            0x0 /* refer to coding in CT */
+#define HDMI_AUDIO_CXT_HE_AAC        0x1
+#define HDMI_AUDIO_CXT_HE_AAC_V2     0x2
+#define HDMI_AUDIO_CXT_MPEG_SURROUND 0x3
+
+/* all fields little endian */
+struct hdmi_stereo_infoframe {
+       /* PB0 */
+       u8 csum;
+
+       /* PB1 */
+       u8 regid0;
+
+       /* PB2 */
+       u8 regid1;
+
+       /* PB3 */
+       u8 regid2;
+
+       /* PB4 */
+       unsigned res1:5;
+       unsigned hdmi_video_format:3;
+
+       /* PB5 */
+       unsigned res2:4;
+       unsigned _3d_structure:4;
+
+       /* PB6*/
+       unsigned res3:4;
+       unsigned _3d_ext_data:4;
+} __packed;
+
+#define HDMI_VENDOR_VERSION 0x01
+
+/* register definitions */
+#define HDMI_CTXSW                                             0x00
+
+#define HDMI_NV_PDISP_SOR_STATE0                               0x01
+#define SOR_STATE_UPDATE (1 << 0)
+
+#define HDMI_NV_PDISP_SOR_STATE1                               0x02
+#define SOR_STATE_ASY_HEAD_OPMODE_AWAKE (2 << 0)
+#define SOR_STATE_ASY_ORMODE_NORMAL     (1 << 2)
+#define SOR_STATE_ATTACHED              (1 << 3)
+
+#define HDMI_NV_PDISP_SOR_STATE2                               0x03
+#define SOR_STATE_ASY_OWNER_NONE         (0 <<  0)
+#define SOR_STATE_ASY_OWNER_HEAD0        (1 <<  0)
+#define SOR_STATE_ASY_SUBOWNER_NONE      (0 <<  4)
+#define SOR_STATE_ASY_SUBOWNER_SUBHEAD0  (1 <<  4)
+#define SOR_STATE_ASY_SUBOWNER_SUBHEAD1  (2 <<  4)
+#define SOR_STATE_ASY_SUBOWNER_BOTH      (3 <<  4)
+#define SOR_STATE_ASY_CRCMODE_ACTIVE     (0 <<  6)
+#define SOR_STATE_ASY_CRCMODE_COMPLETE   (1 <<  6)
+#define SOR_STATE_ASY_CRCMODE_NON_ACTIVE (2 <<  6)
+#define SOR_STATE_ASY_PROTOCOL_SINGLE_TMDS_A (1 << 8)
+#define SOR_STATE_ASY_PROTOCOL_CUSTOM        (15 << 8)
+#define SOR_STATE_ASY_HSYNCPOL_POS       (0 << 12)
+#define SOR_STATE_ASY_HSYNCPOL_NEG       (1 << 12)
+#define SOR_STATE_ASY_VSYNCPOL_POS       (0 << 13)
+#define SOR_STATE_ASY_VSYNCPOL_NEG       (1 << 13)
+#define SOR_STATE_ASY_DEPOL_POS          (0 << 14)
+#define SOR_STATE_ASY_DEPOL_NEG          (1 << 14)
+
+#define HDMI_NV_PDISP_RG_HDCP_AN_MSB                           0x04
+#define HDMI_NV_PDISP_RG_HDCP_AN_LSB                           0x05
+#define HDMI_NV_PDISP_RG_HDCP_CN_MSB                           0x06
+#define HDMI_NV_PDISP_RG_HDCP_CN_LSB                           0x07
+#define HDMI_NV_PDISP_RG_HDCP_AKSV_MSB                         0x08
+#define HDMI_NV_PDISP_RG_HDCP_AKSV_LSB                         0x09
+#define HDMI_NV_PDISP_RG_HDCP_BKSV_MSB                         0x0a
+#define HDMI_NV_PDISP_RG_HDCP_BKSV_LSB                         0x0b
+#define HDMI_NV_PDISP_RG_HDCP_CKSV_MSB                         0x0c
+#define HDMI_NV_PDISP_RG_HDCP_CKSV_LSB                         0x0d
+#define HDMI_NV_PDISP_RG_HDCP_DKSV_MSB                         0x0e
+#define HDMI_NV_PDISP_RG_HDCP_DKSV_LSB                         0x0f
+#define HDMI_NV_PDISP_RG_HDCP_CTRL                             0x10
+#define HDMI_NV_PDISP_RG_HDCP_CMODE                            0x11
+#define HDMI_NV_PDISP_RG_HDCP_MPRIME_MSB                       0x12
+#define HDMI_NV_PDISP_RG_HDCP_MPRIME_LSB                       0x13
+#define HDMI_NV_PDISP_RG_HDCP_SPRIME_MSB                       0x14
+#define HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB2                      0x15
+#define HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB1                      0x16
+#define HDMI_NV_PDISP_RG_HDCP_RI                               0x17
+#define HDMI_NV_PDISP_RG_HDCP_CS_MSB                           0x18
+#define HDMI_NV_PDISP_RG_HDCP_CS_LSB                           0x19
+#define HDMI_NV_PDISP_HDMI_AUDIO_EMU0                          0x1a
+#define HDMI_NV_PDISP_HDMI_AUDIO_EMU_RDATA0                    0x1b
+#define HDMI_NV_PDISP_HDMI_AUDIO_EMU1                          0x1c
+#define HDMI_NV_PDISP_HDMI_AUDIO_EMU2                          0x1d
+
+#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL                        0x1e
+#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_STATUS              0x1f
+#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER              0x20
+#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_LOW                0x21
+#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_HIGH       0x22
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL                  0x23
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_STATUS                        0x24
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER                        0x25
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_LOW          0x26
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH         0x27
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_LOW          0x28
+#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH         0x29
+
+#define INFOFRAME_CTRL_ENABLE (1 << 0)
+
+#define INFOFRAME_HEADER_TYPE(x)    (((x) & 0xff) <<  0)
+#define INFOFRAME_HEADER_VERSION(x) (((x) & 0xff) <<  8)
+#define INFOFRAME_HEADER_LEN(x)     (((x) & 0x0f) << 16)
+
+#define HDMI_NV_PDISP_HDMI_GENERIC_CTRL                                0x2a
+#define GENERIC_CTRL_ENABLE (1 <<  0)
+#define GENERIC_CTRL_OTHER  (1 <<  4)
+#define GENERIC_CTRL_SINGLE (1 <<  8)
+#define GENERIC_CTRL_HBLANK (1 << 12)
+#define GENERIC_CTRL_AUDIO  (1 << 16)
+
+#define HDMI_NV_PDISP_HDMI_GENERIC_STATUS                      0x2b
+#define HDMI_NV_PDISP_HDMI_GENERIC_HEADER                      0x2c
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_LOW                        0x2d
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_HIGH               0x2e
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_LOW                        0x2f
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_HIGH               0x30
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_LOW                        0x31
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_HIGH               0x32
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_LOW                        0x33
+#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_HIGH               0x34
+
+#define HDMI_NV_PDISP_HDMI_ACR_CTRL                            0x35
+#define HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_LOW                        0x36
+#define HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_HIGH               0x37
+#define HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW                        0x38
+#define HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH               0x39
+#define HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_LOW                        0x3a
+#define HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_HIGH               0x3b
+#define HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_LOW                        0x3c
+#define HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_HIGH               0x3d
+#define HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_LOW                        0x3e
+#define HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_HIGH               0x3f
+#define HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_LOW                        0x40
+#define HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_HIGH               0x41
+#define HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_LOW                        0x42
+#define HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_HIGH               0x43
+
+#define ACR_SUBPACK_CTS(x) (((x) & 0xffffff) << 8)
+#define ACR_SUBPACK_N(x)   (((x) & 0xffffff) << 0)
+#define ACR_ENABLE         (1 << 31)
+
+#define HDMI_NV_PDISP_HDMI_CTRL                                        0x44
+#define HDMI_CTRL_REKEY(x)         (((x) & 0x7f) <<  0)
+#define HDMI_CTRL_MAX_AC_PACKET(x) (((x) & 0x1f) << 16)
+#define HDMI_CTRL_ENABLE           (1 << 30)
+
+#define HDMI_NV_PDISP_HDMI_VSYNC_KEEPOUT                       0x45
+#define HDMI_NV_PDISP_HDMI_VSYNC_WINDOW                                0x46
+#define VSYNC_WINDOW_END(x)   (((x) & 0x3ff) <<  0)
+#define VSYNC_WINDOW_START(x) (((x) & 0x3ff) << 16)
+#define VSYNC_WINDOW_ENABLE   (1 << 31)
+
+#define HDMI_NV_PDISP_HDMI_GCP_CTRL                            0x47
+#define HDMI_NV_PDISP_HDMI_GCP_STATUS                          0x48
+#define HDMI_NV_PDISP_HDMI_GCP_SUBPACK                         0x49
+#define HDMI_NV_PDISP_HDMI_CHANNEL_STATUS1                     0x4a
+#define HDMI_NV_PDISP_HDMI_CHANNEL_STATUS2                     0x4b
+#define HDMI_NV_PDISP_HDMI_EMU0                                        0x4c
+#define HDMI_NV_PDISP_HDMI_EMU1                                        0x4d
+#define HDMI_NV_PDISP_HDMI_EMU1_RDATA                          0x4e
+
+#define HDMI_NV_PDISP_HDMI_SPARE                               0x4f
+#define SPARE_HW_CTS           (1 << 0)
+#define SPARE_FORCE_SW_CTS     (1 << 1)
+#define SPARE_CTS_RESET_VAL(x) (((x) & 0x7) << 16)
+
+#define HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS1                   0x50
+#define HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS2                   0x51
+#define HDMI_NV_PDISP_HDMI_HDCPRIF_ROM_CTRL                    0x53
+#define HDMI_NV_PDISP_SOR_CAP                                  0x54
+#define HDMI_NV_PDISP_SOR_PWR                                  0x55
+#define SOR_PWR_NORMAL_STATE_PD     (0 <<  0)
+#define SOR_PWR_NORMAL_STATE_PU     (1 <<  0)
+#define SOR_PWR_NORMAL_START_NORMAL (0 <<  1)
+#define SOR_PWR_NORMAL_START_ALT    (1 <<  1)
+#define SOR_PWR_SAFE_STATE_PD       (0 << 16)
+#define SOR_PWR_SAFE_STATE_PU       (1 << 16)
+#define SOR_PWR_SETTING_NEW_DONE    (0 << 31)
+#define SOR_PWR_SETTING_NEW_PENDING (1 << 31)
+#define SOR_PWR_SETTING_NEW_TRIGGER (1 << 31)
+
+#define HDMI_NV_PDISP_SOR_TEST                                 0x56
+#define HDMI_NV_PDISP_SOR_PLL0                                 0x57
+#define SOR_PLL_PWR            (1 << 0)
+#define SOR_PLL_PDBG           (1 << 1)
+#define SOR_PLL_VCAPD          (1 << 2)
+#define SOR_PLL_PDPORT         (1 << 3)
+#define SOR_PLL_RESISTORSEL    (1 << 4)
+#define SOR_PLL_PULLDOWN       (1 << 5)
+#define SOR_PLL_VCOCAP(x)      (((x) & 0xf) <<  8)
+#define SOR_PLL_BG_V17_S(x)    (((x) & 0xf) << 12)
+#define SOR_PLL_FILTER(x)      (((x) & 0xf) << 16)
+#define SOR_PLL_ICHPMP(x)      (((x) & 0xf) << 24)
+#define SOR_PLL_TX_REG_LOAD(x) (((x) & 0xf) << 28)
+
+#define HDMI_NV_PDISP_SOR_PLL1                                 0x58
+#define SOR_PLL_TMDS_TERM_ENABLE (1 << 8)
+#define SOR_PLL_TMDS_TERMADJ(x)  (((x) & 0xf) <<  9)
+#define SOR_PLL_LOADADJ(x)       (((x) & 0xf) << 20)
+#define SOR_PLL_PE_EN            (1 << 28)
+#define SOR_PLL_HALF_FULL_PE     (1 << 29)
+#define SOR_PLL_S_D_PIN_PE       (1 << 30)
+
+#define HDMI_NV_PDISP_SOR_PLL2                                 0x59
+
+#define HDMI_NV_PDISP_SOR_CSTM                                 0x5a
+#define SOR_CSTM_ROTCLK(x) (((x) & 0xf) << 24)
+
+#define HDMI_NV_PDISP_SOR_LVDS                                 0x5b
+#define HDMI_NV_PDISP_SOR_CRCA                                 0x5c
+#define HDMI_NV_PDISP_SOR_CRCB                                 0x5d
+#define HDMI_NV_PDISP_SOR_BLANK                                        0x5e
+#define HDMI_NV_PDISP_SOR_SEQ_CTL                              0x5f
+#define SOR_SEQ_CTL_PU_PC(x) (((x) & 0xf) <<  0)
+#define SOR_SEQ_PU_PC_ALT(x) (((x) & 0xf) <<  4)
+#define SOR_SEQ_PD_PC(x)     (((x) & 0xf) <<  8)
+#define SOR_SEQ_PD_PC_ALT(x) (((x) & 0xf) << 12)
+#define SOR_SEQ_PC(x)        (((x) & 0xf) << 16)
+#define SOR_SEQ_STATUS       (1 << 28)
+#define SOR_SEQ_SWITCH       (1 << 30)
+
+#define HDMI_NV_PDISP_SOR_SEQ_INST(x)                          (0x60 + (x))
+
+#define SOR_SEQ_INST_WAIT_TIME(x)     (((x) & 0x3ff) << 0)
+#define SOR_SEQ_INST_WAIT_UNITS_VSYNC (2 << 12)
+#define SOR_SEQ_INST_HALT             (1 << 15)
+#define SOR_SEQ_INST_PIN_A_LOW        (0 << 21)
+#define SOR_SEQ_INST_PIN_A_HIGH       (1 << 21)
+#define SOR_SEQ_INST_PIN_B_LOW        (0 << 22)
+#define SOR_SEQ_INST_PIN_B_HIGH       (1 << 22)
+#define SOR_SEQ_INST_DRIVE_PWM_OUT_LO (1 << 23)
+
+#define HDMI_NV_PDISP_SOR_VCRCA0                               0x72
+#define HDMI_NV_PDISP_SOR_VCRCA1                               0x73
+#define HDMI_NV_PDISP_SOR_CCRCA0                               0x74
+#define HDMI_NV_PDISP_SOR_CCRCA1                               0x75
+#define HDMI_NV_PDISP_SOR_EDATAA0                              0x76
+#define HDMI_NV_PDISP_SOR_EDATAA1                              0x77
+#define HDMI_NV_PDISP_SOR_COUNTA0                              0x78
+#define HDMI_NV_PDISP_SOR_COUNTA1                              0x79
+#define HDMI_NV_PDISP_SOR_DEBUGA0                              0x7a
+#define HDMI_NV_PDISP_SOR_DEBUGA1                              0x7b
+#define HDMI_NV_PDISP_SOR_TRIG                                 0x7c
+#define HDMI_NV_PDISP_SOR_MSCHECK                              0x7d
+
+#define HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT                   0x7e
+#define DRIVE_CURRENT_LANE0(x)      (((x) & 0x3f) <<  0)
+#define DRIVE_CURRENT_LANE1(x)      (((x) & 0x3f) <<  8)
+#define DRIVE_CURRENT_LANE2(x)      (((x) & 0x3f) << 16)
+#define DRIVE_CURRENT_LANE3(x)      (((x) & 0x3f) << 24)
+#define DRIVE_CURRENT_FUSE_OVERRIDE (1 << 31)
+
+#define DRIVE_CURRENT_1_500_mA  0x00
+#define DRIVE_CURRENT_1_875_mA  0x01
+#define DRIVE_CURRENT_2_250_mA  0x02
+#define DRIVE_CURRENT_2_625_mA  0x03
+#define DRIVE_CURRENT_3_000_mA  0x04
+#define DRIVE_CURRENT_3_375_mA  0x05
+#define DRIVE_CURRENT_3_750_mA  0x06
+#define DRIVE_CURRENT_4_125_mA  0x07
+#define DRIVE_CURRENT_4_500_mA  0x08
+#define DRIVE_CURRENT_4_875_mA  0x09
+#define DRIVE_CURRENT_5_250_mA  0x0a
+#define DRIVE_CURRENT_5_625_mA  0x0b
+#define DRIVE_CURRENT_6_000_mA  0x0c
+#define DRIVE_CURRENT_6_375_mA  0x0d
+#define DRIVE_CURRENT_6_750_mA  0x0e
+#define DRIVE_CURRENT_7_125_mA  0x0f
+#define DRIVE_CURRENT_7_500_mA  0x10
+#define DRIVE_CURRENT_7_875_mA  0x11
+#define DRIVE_CURRENT_8_250_mA  0x12
+#define DRIVE_CURRENT_8_625_mA  0x13
+#define DRIVE_CURRENT_9_000_mA  0x14
+#define DRIVE_CURRENT_9_375_mA  0x15
+#define DRIVE_CURRENT_9_750_mA  0x16
+#define DRIVE_CURRENT_10_125_mA 0x17
+#define DRIVE_CURRENT_10_500_mA 0x18
+#define DRIVE_CURRENT_10_875_mA 0x19
+#define DRIVE_CURRENT_11_250_mA 0x1a
+#define DRIVE_CURRENT_11_625_mA 0x1b
+#define DRIVE_CURRENT_12_000_mA 0x1c
+#define DRIVE_CURRENT_12_375_mA 0x1d
+#define DRIVE_CURRENT_12_750_mA 0x1e
+#define DRIVE_CURRENT_13_125_mA 0x1f
+#define DRIVE_CURRENT_13_500_mA 0x20
+#define DRIVE_CURRENT_13_875_mA 0x21
+#define DRIVE_CURRENT_14_250_mA 0x22
+#define DRIVE_CURRENT_14_625_mA 0x23
+#define DRIVE_CURRENT_15_000_mA 0x24
+#define DRIVE_CURRENT_15_375_mA 0x25
+#define DRIVE_CURRENT_15_750_mA 0x26
+#define DRIVE_CURRENT_16_125_mA 0x27
+#define DRIVE_CURRENT_16_500_mA 0x28
+#define DRIVE_CURRENT_16_875_mA 0x29
+#define DRIVE_CURRENT_17_250_mA 0x2a
+#define DRIVE_CURRENT_17_625_mA 0x2b
+#define DRIVE_CURRENT_18_000_mA 0x2c
+#define DRIVE_CURRENT_18_375_mA 0x2d
+#define DRIVE_CURRENT_18_750_mA 0x2e
+#define DRIVE_CURRENT_19_125_mA 0x2f
+#define DRIVE_CURRENT_19_500_mA 0x30
+#define DRIVE_CURRENT_19_875_mA 0x31
+#define DRIVE_CURRENT_20_250_mA 0x32
+#define DRIVE_CURRENT_20_625_mA 0x33
+#define DRIVE_CURRENT_21_000_mA 0x34
+#define DRIVE_CURRENT_21_375_mA 0x35
+#define DRIVE_CURRENT_21_750_mA 0x36
+#define DRIVE_CURRENT_22_125_mA 0x37
+#define DRIVE_CURRENT_22_500_mA 0x38
+#define DRIVE_CURRENT_22_875_mA 0x39
+#define DRIVE_CURRENT_23_250_mA 0x3a
+#define DRIVE_CURRENT_23_625_mA 0x3b
+#define DRIVE_CURRENT_24_000_mA 0x3c
+#define DRIVE_CURRENT_24_375_mA 0x3d
+#define DRIVE_CURRENT_24_750_mA 0x3e
+
+#define HDMI_NV_PDISP_AUDIO_DEBUG0                             0x7f
+#define HDMI_NV_PDISP_AUDIO_DEBUG1                             0x80
+#define HDMI_NV_PDISP_AUDIO_DEBUG2                             0x81
+
+#define HDMI_NV_PDISP_AUDIO_FS(x)                              (0x82 + (x))
+#define AUDIO_FS_LOW(x)  (((x) & 0xfff) <<  0)
+#define AUDIO_FS_HIGH(x) (((x) & 0xfff) << 16)
+
+#define HDMI_NV_PDISP_AUDIO_PULSE_WIDTH                                0x89
+#define HDMI_NV_PDISP_AUDIO_THRESHOLD                          0x8a
+#define HDMI_NV_PDISP_AUDIO_CNTRL0                             0x8b
+#define AUDIO_CNTRL0_ERROR_TOLERANCE(x)  (((x) & 0xff) << 0)
+#define AUDIO_CNTRL0_SOURCE_SELECT_AUTO  (0 << 20)
+#define AUDIO_CNTRL0_SOURCE_SELECT_SPDIF (1 << 20)
+#define AUDIO_CNTRL0_SOURCE_SELECT_HDAL  (2 << 20)
+#define AUDIO_CNTRL0_FRAMES_PER_BLOCK(x) (((x) & 0xff) << 24)
+
+#define HDMI_NV_PDISP_AUDIO_N                                  0x8c
+#define AUDIO_N_VALUE(x)           (((x) & 0xfffff) << 0)
+#define AUDIO_N_RESETF             (1 << 20)
+#define AUDIO_N_GENERATE_NORMAL    (0 << 24)
+#define AUDIO_N_GENERATE_ALTERNATE (1 << 24)
+
+#define HDMI_NV_PDISP_HDCPRIF_ROM_TIMING                       0x94
+#define HDMI_NV_PDISP_SOR_REFCLK                               0x95
+#define SOR_REFCLK_DIV_INT(x)  (((x) & 0xff) << 8)
+#define SOR_REFCLK_DIV_FRAC(x) (((x) & 0x03) << 6)
+
+#define HDMI_NV_PDISP_CRC_CONTROL                              0x96
+#define HDMI_NV_PDISP_INPUT_CONTROL                            0x97
+#define HDMI_SRC_DISPLAYA       (0 << 0)
+#define HDMI_SRC_DISPLAYB       (1 << 0)
+#define ARM_VIDEO_RANGE_FULL    (0 << 1)
+#define ARM_VIDEO_RANGE_LIMITED (1 << 1)
+
+#define HDMI_NV_PDISP_SCRATCH                                  0x98
+#define HDMI_NV_PDISP_PE_CURRENT                               0x99
+#define PE_CURRENT0(x) (((x) & 0xf) << 0)
+#define PE_CURRENT1(x) (((x) & 0xf) << 8)
+#define PE_CURRENT2(x) (((x) & 0xf) << 16)
+#define PE_CURRENT3(x) (((x) & 0xf) << 24)
+
+#define PE_CURRENT_0_0_mA 0x0
+#define PE_CURRENT_0_5_mA 0x1
+#define PE_CURRENT_1_0_mA 0x2
+#define PE_CURRENT_1_5_mA 0x3
+#define PE_CURRENT_2_0_mA 0x4
+#define PE_CURRENT_2_5_mA 0x5
+#define PE_CURRENT_3_0_mA 0x6
+#define PE_CURRENT_3_5_mA 0x7
+#define PE_CURRENT_4_0_mA 0x8
+#define PE_CURRENT_4_5_mA 0x9
+#define PE_CURRENT_5_0_mA 0xa
+#define PE_CURRENT_5_5_mA 0xb
+#define PE_CURRENT_6_0_mA 0xc
+#define PE_CURRENT_6_5_mA 0xd
+#define PE_CURRENT_7_0_mA 0xe
+#define PE_CURRENT_7_5_mA 0xf
+
+#define HDMI_NV_PDISP_KEY_CTRL                                 0x9a
+#define HDMI_NV_PDISP_KEY_DEBUG0                               0x9b
+#define HDMI_NV_PDISP_KEY_DEBUG1                               0x9c
+#define HDMI_NV_PDISP_KEY_DEBUG2                               0x9d
+#define HDMI_NV_PDISP_KEY_HDCP_KEY_0                           0x9e
+#define HDMI_NV_PDISP_KEY_HDCP_KEY_1                           0x9f
+#define HDMI_NV_PDISP_KEY_HDCP_KEY_2                           0xa0
+#define HDMI_NV_PDISP_KEY_HDCP_KEY_3                           0xa1
+#define HDMI_NV_PDISP_KEY_HDCP_KEY_TRIG                                0xa2
+#define HDMI_NV_PDISP_KEY_SKEY_INDEX                           0xa3
+
+#define HDMI_NV_PDISP_SOR_AUDIO_CNTRL0                         0xac
+#define AUDIO_CNTRL0_INJECT_NULLSMPL (1 << 29)
+#define HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR                  0xbc
+#define HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE                   0xbd
+
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0320    0xbf
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0441    0xc0
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0882    0xc1
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_1764    0xc2
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0480    0xc3
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0960    0xc4
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_1920    0xc5
+#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_DEFAULT 0xc5
+
+#endif /* TEGRA_HDMI_H */
diff --git a/drivers/gpu/drm/tegra/host1x.c b/drivers/gpu/drm/tegra/host1x.c
new file mode 100644 (file)
index 0000000..bdb97a5
--- /dev/null
@@ -0,0 +1,325 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012 NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "drm.h"
+
+struct host1x_drm_client {
+       struct host1x_client *client;
+       struct device_node *np;
+       struct list_head list;
+};
+
+static int host1x_add_drm_client(struct host1x *host1x, struct device_node *np)
+{
+       struct host1x_drm_client *client;
+
+       client = kzalloc(sizeof(*client), GFP_KERNEL);
+       if (!client)
+               return -ENOMEM;
+
+       INIT_LIST_HEAD(&client->list);
+       client->np = of_node_get(np);
+
+       list_add_tail(&client->list, &host1x->drm_clients);
+
+       return 0;
+}
+
+static int host1x_activate_drm_client(struct host1x *host1x,
+                                     struct host1x_drm_client *drm,
+                                     struct host1x_client *client)
+{
+       mutex_lock(&host1x->drm_clients_lock);
+       list_del_init(&drm->list);
+       list_add_tail(&drm->list, &host1x->drm_active);
+       drm->client = client;
+       mutex_unlock(&host1x->drm_clients_lock);
+
+       return 0;
+}
+
+static int host1x_remove_drm_client(struct host1x *host1x,
+                                   struct host1x_drm_client *client)
+{
+       mutex_lock(&host1x->drm_clients_lock);
+       list_del_init(&client->list);
+       mutex_unlock(&host1x->drm_clients_lock);
+
+       of_node_put(client->np);
+       kfree(client);
+
+       return 0;
+}
+
+static int host1x_parse_dt(struct host1x *host1x)
+{
+       static const char * const compat[] = {
+               "nvidia,tegra20-dc",
+               "nvidia,tegra20-hdmi",
+               "nvidia,tegra30-dc",
+               "nvidia,tegra30-hdmi",
+       };
+       unsigned int i;
+       int err;
+
+       for (i = 0; i < ARRAY_SIZE(compat); i++) {
+               struct device_node *np;
+
+               for_each_child_of_node(host1x->dev->of_node, np) {
+                       if (of_device_is_compatible(np, compat[i]) &&
+                           of_device_is_available(np)) {
+                               err = host1x_add_drm_client(host1x, np);
+                               if (err < 0)
+                                       return err;
+                       }
+               }
+       }
+
+       return 0;
+}
+
+static int tegra_host1x_probe(struct platform_device *pdev)
+{
+       struct host1x *host1x;
+       struct resource *regs;
+       int err;
+
+       host1x = devm_kzalloc(&pdev->dev, sizeof(*host1x), GFP_KERNEL);
+       if (!host1x)
+               return -ENOMEM;
+
+       mutex_init(&host1x->drm_clients_lock);
+       INIT_LIST_HEAD(&host1x->drm_clients);
+       INIT_LIST_HEAD(&host1x->drm_active);
+       mutex_init(&host1x->clients_lock);
+       INIT_LIST_HEAD(&host1x->clients);
+       host1x->dev = &pdev->dev;
+
+       err = host1x_parse_dt(host1x);
+       if (err < 0) {
+               dev_err(&pdev->dev, "failed to parse DT: %d\n", err);
+               return err;
+       }
+
+       host1x->clk = devm_clk_get(&pdev->dev, NULL);
+       if (IS_ERR(host1x->clk))
+               return PTR_ERR(host1x->clk);
+
+       err = clk_prepare_enable(host1x->clk);
+       if (err < 0)
+               return err;
+
+       regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!regs) {
+               err = -ENXIO;
+               goto err;
+       }
+
+       err = platform_get_irq(pdev, 0);
+       if (err < 0)
+               goto err;
+
+       host1x->syncpt = err;
+
+       err = platform_get_irq(pdev, 1);
+       if (err < 0)
+               goto err;
+
+       host1x->irq = err;
+
+       host1x->regs = devm_request_and_ioremap(&pdev->dev, regs);
+       if (!host1x->regs) {
+               err = -EADDRNOTAVAIL;
+               goto err;
+       }
+
+       platform_set_drvdata(pdev, host1x);
+
+       return 0;
+
+err:
+       clk_disable_unprepare(host1x->clk);
+       return err;
+}
+
+static int tegra_host1x_remove(struct platform_device *pdev)
+{
+       struct host1x *host1x = platform_get_drvdata(pdev);
+
+       clk_disable_unprepare(host1x->clk);
+
+       return 0;
+}
+
+int host1x_drm_init(struct host1x *host1x, struct drm_device *drm)
+{
+       struct host1x_client *client;
+
+       mutex_lock(&host1x->clients_lock);
+
+       list_for_each_entry(client, &host1x->clients, list) {
+               if (client->ops && client->ops->drm_init) {
+                       int err = client->ops->drm_init(client, drm);
+                       if (err < 0) {
+                               dev_err(host1x->dev,
+                                       "DRM setup failed for %s: %d\n",
+                                       dev_name(client->dev), err);
+                               return err;
+                       }
+               }
+       }
+
+       mutex_unlock(&host1x->clients_lock);
+
+       return 0;
+}
+
+int host1x_drm_exit(struct host1x *host1x)
+{
+       struct platform_device *pdev = to_platform_device(host1x->dev);
+       struct host1x_client *client;
+
+       if (!host1x->drm)
+               return 0;
+
+       mutex_lock(&host1x->clients_lock);
+
+       list_for_each_entry_reverse(client, &host1x->clients, list) {
+               if (client->ops && client->ops->drm_exit) {
+                       int err = client->ops->drm_exit(client);
+                       if (err < 0) {
+                               dev_err(host1x->dev,
+                                       "DRM cleanup failed for %s: %d\n",
+                                       dev_name(client->dev), err);
+                               return err;
+                       }
+               }
+       }
+
+       mutex_unlock(&host1x->clients_lock);
+
+       drm_platform_exit(&tegra_drm_driver, pdev);
+       host1x->drm = NULL;
+
+       return 0;
+}
+
+int host1x_register_client(struct host1x *host1x, struct host1x_client *client)
+{
+       struct host1x_drm_client *drm, *tmp;
+       int err;
+
+       mutex_lock(&host1x->clients_lock);
+       list_add_tail(&client->list, &host1x->clients);
+       mutex_unlock(&host1x->clients_lock);
+
+       list_for_each_entry_safe(drm, tmp, &host1x->drm_clients, list)
+               if (drm->np == client->dev->of_node)
+                       host1x_activate_drm_client(host1x, drm, client);
+
+       if (list_empty(&host1x->drm_clients)) {
+               struct platform_device *pdev = to_platform_device(host1x->dev);
+
+               err = drm_platform_init(&tegra_drm_driver, pdev);
+               if (err < 0) {
+                       dev_err(host1x->dev, "drm_platform_init(): %d\n", err);
+                       return err;
+               }
+       }
+
+       return 0;
+}
+
+int host1x_unregister_client(struct host1x *host1x,
+                            struct host1x_client *client)
+{
+       struct host1x_drm_client *drm, *tmp;
+       int err;
+
+       list_for_each_entry_safe(drm, tmp, &host1x->drm_active, list) {
+               if (drm->client == client) {
+                       err = host1x_drm_exit(host1x);
+                       if (err < 0) {
+                               dev_err(host1x->dev, "host1x_drm_exit(): %d\n",
+                                       err);
+                               return err;
+                       }
+
+                       host1x_remove_drm_client(host1x, drm);
+                       break;
+               }
+       }
+
+       mutex_lock(&host1x->clients_lock);
+       list_del_init(&client->list);
+       mutex_unlock(&host1x->clients_lock);
+
+       return 0;
+}
+
+static struct of_device_id tegra_host1x_of_match[] = {
+       { .compatible = "nvidia,tegra30-host1x", },
+       { .compatible = "nvidia,tegra20-host1x", },
+       { },
+};
+MODULE_DEVICE_TABLE(of, tegra_host1x_of_match);
+
+struct platform_driver tegra_host1x_driver = {
+       .driver = {
+               .name = "tegra-host1x",
+               .owner = THIS_MODULE,
+               .of_match_table = tegra_host1x_of_match,
+       },
+       .probe = tegra_host1x_probe,
+       .remove = tegra_host1x_remove,
+};
+
+static int __init tegra_host1x_init(void)
+{
+       int err;
+
+       err = platform_driver_register(&tegra_host1x_driver);
+       if (err < 0)
+               return err;
+
+       err = platform_driver_register(&tegra_dc_driver);
+       if (err < 0)
+               goto unregister_host1x;
+
+       err = platform_driver_register(&tegra_hdmi_driver);
+       if (err < 0)
+               goto unregister_dc;
+
+       return 0;
+
+unregister_dc:
+       platform_driver_unregister(&tegra_dc_driver);
+unregister_host1x:
+       platform_driver_unregister(&tegra_host1x_driver);
+       return err;
+}
+module_init(tegra_host1x_init);
+
+static void __exit tegra_host1x_exit(void)
+{
+       platform_driver_unregister(&tegra_hdmi_driver);
+       platform_driver_unregister(&tegra_dc_driver);
+       platform_driver_unregister(&tegra_host1x_driver);
+}
+module_exit(tegra_host1x_exit);
+
+MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
+MODULE_DESCRIPTION("NVIDIA Tegra DRM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c
new file mode 100644 (file)
index 0000000..8140fc6
--- /dev/null
@@ -0,0 +1,272 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012 NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/of_i2c.h>
+
+#include "drm.h"
+
+static int tegra_connector_get_modes(struct drm_connector *connector)
+{
+       struct tegra_output *output = connector_to_output(connector);
+       struct edid *edid = NULL;
+       int err = 0;
+
+       if (output->edid)
+               edid = kmemdup(output->edid, sizeof(*edid), GFP_KERNEL);
+       else if (output->ddc)
+               edid = drm_get_edid(connector, output->ddc);
+
+       drm_mode_connector_update_edid_property(connector, edid);
+
+       if (edid) {
+               err = drm_add_edid_modes(connector, edid);
+               kfree(edid);
+       }
+
+       return err;
+}
+
+static int tegra_connector_mode_valid(struct drm_connector *connector,
+                                     struct drm_display_mode *mode)
+{
+       struct tegra_output *output = connector_to_output(connector);
+       enum drm_mode_status status = MODE_OK;
+       int err;
+
+       err = tegra_output_check_mode(output, mode, &status);
+       if (err < 0)
+               return MODE_ERROR;
+
+       return status;
+}
+
+static struct drm_encoder *
+tegra_connector_best_encoder(struct drm_connector *connector)
+{
+       struct tegra_output *output = connector_to_output(connector);
+
+       return &output->encoder;
+}
+
+static const struct drm_connector_helper_funcs connector_helper_funcs = {
+       .get_modes = tegra_connector_get_modes,
+       .mode_valid = tegra_connector_mode_valid,
+       .best_encoder = tegra_connector_best_encoder,
+};
+
+static enum drm_connector_status
+tegra_connector_detect(struct drm_connector *connector, bool force)
+{
+       struct tegra_output *output = connector_to_output(connector);
+       enum drm_connector_status status = connector_status_unknown;
+
+       if (gpio_is_valid(output->hpd_gpio)) {
+               if (gpio_get_value(output->hpd_gpio) == 0)
+                       status = connector_status_disconnected;
+               else
+                       status = connector_status_connected;
+       } else {
+               if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
+                       status = connector_status_connected;
+       }
+
+       return status;
+}
+
+static void tegra_connector_destroy(struct drm_connector *connector)
+{
+       drm_sysfs_connector_remove(connector);
+       drm_connector_cleanup(connector);
+}
+
+static const struct drm_connector_funcs connector_funcs = {
+       .dpms = drm_helper_connector_dpms,
+       .detect = tegra_connector_detect,
+       .fill_modes = drm_helper_probe_single_connector_modes,
+       .destroy = tegra_connector_destroy,
+};
+
+static void tegra_encoder_destroy(struct drm_encoder *encoder)
+{
+       drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_funcs encoder_funcs = {
+       .destroy = tegra_encoder_destroy,
+};
+
+static void tegra_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+}
+
+static bool tegra_encoder_mode_fixup(struct drm_encoder *encoder,
+                                    const struct drm_display_mode *mode,
+                                    struct drm_display_mode *adjusted)
+{
+       return true;
+}
+
+static void tegra_encoder_prepare(struct drm_encoder *encoder)
+{
+}
+
+static void tegra_encoder_commit(struct drm_encoder *encoder)
+{
+}
+
+static void tegra_encoder_mode_set(struct drm_encoder *encoder,
+                                  struct drm_display_mode *mode,
+                                  struct drm_display_mode *adjusted)
+{
+       struct tegra_output *output = encoder_to_output(encoder);
+       int err;
+
+       err = tegra_output_enable(output);
+       if (err < 0)
+               dev_err(encoder->dev->dev, "tegra_output_enable(): %d\n", err);
+}
+
+static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
+       .dpms = tegra_encoder_dpms,
+       .mode_fixup = tegra_encoder_mode_fixup,
+       .prepare = tegra_encoder_prepare,
+       .commit = tegra_encoder_commit,
+       .mode_set = tegra_encoder_mode_set,
+};
+
+static irqreturn_t hpd_irq(int irq, void *data)
+{
+       struct tegra_output *output = data;
+
+       drm_helper_hpd_irq_event(output->connector.dev);
+
+       return IRQ_HANDLED;
+}
+
+int tegra_output_parse_dt(struct tegra_output *output)
+{
+       enum of_gpio_flags flags;
+       struct device_node *ddc;
+       size_t size;
+       int err;
+
+       if (!output->of_node)
+               output->of_node = output->dev->of_node;
+
+       output->edid = of_get_property(output->of_node, "nvidia,edid", &size);
+
+       ddc = of_parse_phandle(output->of_node, "nvidia,ddc-i2c-bus", 0);
+       if (ddc) {
+               output->ddc = of_find_i2c_adapter_by_node(ddc);
+               if (!output->ddc) {
+                       err = -EPROBE_DEFER;
+                       of_node_put(ddc);
+                       return err;
+               }
+
+               of_node_put(ddc);
+       }
+
+       if (!output->edid && !output->ddc)
+               return -ENODEV;
+
+       output->hpd_gpio = of_get_named_gpio_flags(output->of_node,
+                                                  "nvidia,hpd-gpio", 0,
+                                                  &flags);
+
+       return 0;
+}
+
+int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
+{
+       int connector, encoder, err;
+
+       if (gpio_is_valid(output->hpd_gpio)) {
+               unsigned long flags;
+
+               err = gpio_request_one(output->hpd_gpio, GPIOF_DIR_IN,
+                                      "HDMI hotplug detect");
+               if (err < 0) {
+                       dev_err(output->dev, "gpio_request_one(): %d\n", err);
+                       return err;
+               }
+
+               err = gpio_to_irq(output->hpd_gpio);
+               if (err < 0) {
+                       dev_err(output->dev, "gpio_to_irq(): %d\n", err);
+                       goto free_hpd;
+               }
+
+               output->hpd_irq = err;
+
+               flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
+                       IRQF_ONESHOT;
+
+               err = request_threaded_irq(output->hpd_irq, NULL, hpd_irq,
+                                          flags, "hpd", output);
+               if (err < 0) {
+                       dev_err(output->dev, "failed to request IRQ#%u: %d\n",
+                               output->hpd_irq, err);
+                       goto free_hpd;
+               }
+
+               output->connector.polled = DRM_CONNECTOR_POLL_HPD;
+       }
+
+       switch (output->type) {
+       case TEGRA_OUTPUT_RGB:
+               connector = DRM_MODE_CONNECTOR_LVDS;
+               encoder = DRM_MODE_ENCODER_LVDS;
+               break;
+
+       case TEGRA_OUTPUT_HDMI:
+               connector = DRM_MODE_CONNECTOR_HDMIA;
+               encoder = DRM_MODE_ENCODER_TMDS;
+               break;
+
+       default:
+               connector = DRM_MODE_CONNECTOR_Unknown;
+               encoder = DRM_MODE_ENCODER_NONE;
+               break;
+       }
+
+       drm_connector_init(drm, &output->connector, &connector_funcs,
+                          connector);
+       drm_connector_helper_add(&output->connector, &connector_helper_funcs);
+
+       drm_encoder_init(drm, &output->encoder, &encoder_funcs, encoder);
+       drm_encoder_helper_add(&output->encoder, &encoder_helper_funcs);
+
+       drm_mode_connector_attach_encoder(&output->connector, &output->encoder);
+       drm_sysfs_connector_add(&output->connector);
+
+       output->encoder.possible_crtcs = 0x3;
+
+       return 0;
+
+free_hpd:
+       gpio_free(output->hpd_gpio);
+
+       return err;
+}
+
+int tegra_output_exit(struct tegra_output *output)
+{
+       if (gpio_is_valid(output->hpd_gpio)) {
+               free_irq(output->hpd_irq, output);
+               gpio_free(output->hpd_gpio);
+       }
+
+       if (output->ddc)
+               put_device(&output->ddc->dev);
+
+       return 0;
+}
diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c
new file mode 100644 (file)
index 0000000..ed4416f
--- /dev/null
@@ -0,0 +1,228 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012 NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "drm.h"
+#include "dc.h"
+
+struct tegra_rgb {
+       struct tegra_output output;
+       struct clk *clk_parent;
+       struct clk *clk;
+};
+
+static inline struct tegra_rgb *to_rgb(struct tegra_output *output)
+{
+       return container_of(output, struct tegra_rgb, output);
+}
+
+struct reg_entry {
+       unsigned long offset;
+       unsigned long value;
+};
+
+static const struct reg_entry rgb_enable[] = {
+       { DC_COM_PIN_OUTPUT_ENABLE(0),   0x00000000 },
+       { DC_COM_PIN_OUTPUT_ENABLE(1),   0x00000000 },
+       { DC_COM_PIN_OUTPUT_ENABLE(2),   0x00000000 },
+       { DC_COM_PIN_OUTPUT_ENABLE(3),   0x00000000 },
+       { DC_COM_PIN_OUTPUT_POLARITY(0), 0x00000000 },
+       { DC_COM_PIN_OUTPUT_POLARITY(1), 0x01000000 },
+       { DC_COM_PIN_OUTPUT_POLARITY(2), 0x00000000 },
+       { DC_COM_PIN_OUTPUT_POLARITY(3), 0x00000000 },
+       { DC_COM_PIN_OUTPUT_DATA(0),     0x00000000 },
+       { DC_COM_PIN_OUTPUT_DATA(1),     0x00000000 },
+       { DC_COM_PIN_OUTPUT_DATA(2),     0x00000000 },
+       { DC_COM_PIN_OUTPUT_DATA(3),     0x00000000 },
+       { DC_COM_PIN_OUTPUT_SELECT(0),   0x00000000 },
+       { DC_COM_PIN_OUTPUT_SELECT(1),   0x00000000 },
+       { DC_COM_PIN_OUTPUT_SELECT(2),   0x00000000 },
+       { DC_COM_PIN_OUTPUT_SELECT(3),   0x00000000 },
+       { DC_COM_PIN_OUTPUT_SELECT(4),   0x00210222 },
+       { DC_COM_PIN_OUTPUT_SELECT(5),   0x00002200 },
+       { DC_COM_PIN_OUTPUT_SELECT(6),   0x00020000 },
+};
+
+static const struct reg_entry rgb_disable[] = {
+       { DC_COM_PIN_OUTPUT_SELECT(6),   0x00000000 },
+       { DC_COM_PIN_OUTPUT_SELECT(5),   0x00000000 },
+       { DC_COM_PIN_OUTPUT_SELECT(4),   0x00000000 },
+       { DC_COM_PIN_OUTPUT_SELECT(3),   0x00000000 },
+       { DC_COM_PIN_OUTPUT_SELECT(2),   0x00000000 },
+       { DC_COM_PIN_OUTPUT_SELECT(1),   0x00000000 },
+       { DC_COM_PIN_OUTPUT_SELECT(0),   0x00000000 },
+       { DC_COM_PIN_OUTPUT_DATA(3),     0xaaaaaaaa },
+       { DC_COM_PIN_OUTPUT_DATA(2),     0xaaaaaaaa },
+       { DC_COM_PIN_OUTPUT_DATA(1),     0xaaaaaaaa },
+       { DC_COM_PIN_OUTPUT_DATA(0),     0xaaaaaaaa },
+       { DC_COM_PIN_OUTPUT_POLARITY(3), 0x00000000 },
+       { DC_COM_PIN_OUTPUT_POLARITY(2), 0x00000000 },
+       { DC_COM_PIN_OUTPUT_POLARITY(1), 0x00000000 },
+       { DC_COM_PIN_OUTPUT_POLARITY(0), 0x00000000 },
+       { DC_COM_PIN_OUTPUT_ENABLE(3),   0x55555555 },
+       { DC_COM_PIN_OUTPUT_ENABLE(2),   0x55555555 },
+       { DC_COM_PIN_OUTPUT_ENABLE(1),   0x55150005 },
+       { DC_COM_PIN_OUTPUT_ENABLE(0),   0x55555555 },
+};
+
+static void tegra_dc_write_regs(struct tegra_dc *dc,
+                               const struct reg_entry *table,
+                               unsigned int num)
+{
+       unsigned int i;
+
+       for (i = 0; i < num; i++)
+               tegra_dc_writel(dc, table[i].value, table[i].offset);
+}
+
+static int tegra_output_rgb_enable(struct tegra_output *output)
+{
+       struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
+
+       tegra_dc_write_regs(dc, rgb_enable, ARRAY_SIZE(rgb_enable));
+
+       return 0;
+}
+
+static int tegra_output_rgb_disable(struct tegra_output *output)
+{
+       struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
+
+       tegra_dc_write_regs(dc, rgb_disable, ARRAY_SIZE(rgb_disable));
+
+       return 0;
+}
+
+static int tegra_output_rgb_setup_clock(struct tegra_output *output,
+                                       struct clk *clk, unsigned long pclk)
+{
+       struct tegra_rgb *rgb = to_rgb(output);
+
+       return clk_set_parent(clk, rgb->clk_parent);
+}
+
+static int tegra_output_rgb_check_mode(struct tegra_output *output,
+                                      struct drm_display_mode *mode,
+                                      enum drm_mode_status *status)
+{
+       /*
+        * FIXME: For now, always assume that the mode is okay. There are
+        * unresolved issues with clk_round_rate(), which doesn't always
+        * reliably report whether a frequency can be set or not.
+        */
+
+       *status = MODE_OK;
+
+       return 0;
+}
+
+static const struct tegra_output_ops rgb_ops = {
+       .enable = tegra_output_rgb_enable,
+       .disable = tegra_output_rgb_disable,
+       .setup_clock = tegra_output_rgb_setup_clock,
+       .check_mode = tegra_output_rgb_check_mode,
+};
+
+int tegra_dc_rgb_probe(struct tegra_dc *dc)
+{
+       struct device_node *np;
+       struct tegra_rgb *rgb;
+       int err;
+
+       np = of_get_child_by_name(dc->dev->of_node, "rgb");
+       if (!np || !of_device_is_available(np))
+               return -ENODEV;
+
+       rgb = devm_kzalloc(dc->dev, sizeof(*rgb), GFP_KERNEL);
+       if (!rgb)
+               return -ENOMEM;
+
+       rgb->clk = devm_clk_get(dc->dev, NULL);
+       if (IS_ERR(rgb->clk)) {
+               dev_err(dc->dev, "failed to get clock\n");
+               return PTR_ERR(rgb->clk);
+       }
+
+       rgb->clk_parent = devm_clk_get(dc->dev, "parent");
+       if (IS_ERR(rgb->clk_parent)) {
+               dev_err(dc->dev, "failed to get parent clock\n");
+               return PTR_ERR(rgb->clk_parent);
+       }
+
+       err = clk_set_parent(rgb->clk, rgb->clk_parent);
+       if (err < 0) {
+               dev_err(dc->dev, "failed to set parent clock: %d\n", err);
+               return err;
+       }
+
+       rgb->output.dev = dc->dev;
+       rgb->output.of_node = np;
+
+       err = tegra_output_parse_dt(&rgb->output);
+       if (err < 0)
+               return err;
+
+       dc->rgb = &rgb->output;
+
+       return 0;
+}
+
+int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc)
+{
+       struct tegra_rgb *rgb = to_rgb(dc->rgb);
+       int err;
+
+       if (!dc->rgb)
+               return -ENODEV;
+
+       rgb->output.type = TEGRA_OUTPUT_RGB;
+       rgb->output.ops = &rgb_ops;
+
+       err = tegra_output_init(dc->base.dev, &rgb->output);
+       if (err < 0) {
+               dev_err(dc->dev, "output setup failed: %d\n", err);
+               return err;
+       }
+
+       /*
+        * By default, outputs can be associated with each display controller.
+        * RGB outputs are an exception, so we make sure they can be attached
+        * to only their parent display controller.
+        */
+       rgb->output.encoder.possible_crtcs = 1 << dc->pipe;
+
+       return 0;
+}
+
+int tegra_dc_rgb_exit(struct tegra_dc *dc)
+{
+       if (dc->rgb) {
+               int err;
+
+               err = tegra_output_disable(dc->rgb);
+               if (err < 0) {
+                       dev_err(dc->dev, "output failed to disable: %d\n", err);
+                       return err;
+               }
+
+               err = tegra_output_exit(dc->rgb);
+               if (err < 0) {
+                       dev_err(dc->dev, "output cleanup failed: %d\n", err);
+                       return err;
+               }
+
+               dc->rgb = NULL;
+       }
+
+       return 0;
+}
index bf6e4b5..a915133 100644 (file)
@@ -162,9 +162,9 @@ int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
 {
        if (interruptible) {
                return wait_event_interruptible(bo->event_queue,
-                                              atomic_read(&bo->reserved) == 0);
+                                              !ttm_bo_is_reserved(bo));
        } else {
-               wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
+               wait_event(bo->event_queue, !ttm_bo_is_reserved(bo));
                return 0;
        }
 }
@@ -175,7 +175,7 @@ void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
        struct ttm_bo_device *bdev = bo->bdev;
        struct ttm_mem_type_manager *man;
 
-       BUG_ON(!atomic_read(&bo->reserved));
+       BUG_ON(!ttm_bo_is_reserved(bo));
 
        if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
 
@@ -220,7 +220,7 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
        struct ttm_bo_global *glob = bo->glob;
        int ret;
 
-       while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
+       while (unlikely(atomic_read(&bo->reserved) != 0)) {
                /**
                 * Deadlock avoidance for multi-bo reserving.
                 */
@@ -249,6 +249,7 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
                        return ret;
        }
 
+       atomic_set(&bo->reserved, 1);
        if (use_sequence) {
                /**
                 * Wake up waiters that may need to recheck for deadlock,
@@ -365,7 +366,7 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
                                  struct ttm_mem_reg *mem,
                                  bool evict, bool interruptible,
-                                 bool no_wait_reserve, bool no_wait_gpu)
+                                 bool no_wait_gpu)
 {
        struct ttm_bo_device *bdev = bo->bdev;
        bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
@@ -419,12 +420,12 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
 
        if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
            !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
-               ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, mem);
+               ret = ttm_bo_move_ttm(bo, evict, no_wait_gpu, mem);
        else if (bdev->driver->move)
                ret = bdev->driver->move(bo, evict, interruptible,
-                                        no_wait_reserve, no_wait_gpu, mem);
+                                        no_wait_gpu, mem);
        else
-               ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, mem);
+               ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, mem);
 
        if (ret) {
                if (bdev->driver->move_notify) {
@@ -487,40 +488,33 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
        ttm_bo_mem_put(bo, &bo->mem);
 
        atomic_set(&bo->reserved, 0);
+       wake_up_all(&bo->event_queue);
 
        /*
-        * Make processes trying to reserve really pick it up.
+        * Since the final reference to this bo may not be dropped by
+        * the current task we have to put a memory barrier here to make
+        * sure the changes done in this function are always visible.
+        *
+        * This function only needs protection against the final kref_put.
         */
-       smp_mb__after_atomic_dec();
-       wake_up_all(&bo->event_queue);
+       smp_mb__before_atomic_dec();
 }
 
 static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
 {
        struct ttm_bo_device *bdev = bo->bdev;
        struct ttm_bo_global *glob = bo->glob;
-       struct ttm_bo_driver *driver;
+       struct ttm_bo_driver *driver = bdev->driver;
        void *sync_obj = NULL;
-       void *sync_obj_arg;
        int put_count;
        int ret;
 
+       spin_lock(&glob->lru_lock);
+       ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+
        spin_lock(&bdev->fence_lock);
        (void) ttm_bo_wait(bo, false, false, true);
-       if (!bo->sync_obj) {
-
-               spin_lock(&glob->lru_lock);
-
-               /**
-                * Lock inversion between bo:reserve and bdev::fence_lock here,
-                * but that's OK, since we're only trylocking.
-                */
-
-               ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
-
-               if (unlikely(ret == -EBUSY))
-                       goto queue;
-
+       if (!ret && !bo->sync_obj) {
                spin_unlock(&bdev->fence_lock);
                put_count = ttm_bo_del_from_lru(bo);
 
@@ -530,22 +524,22 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
                ttm_bo_list_ref_sub(bo, put_count, true);
 
                return;
-       } else {
-               spin_lock(&glob->lru_lock);
        }
-queue:
-       driver = bdev->driver;
        if (bo->sync_obj)
                sync_obj = driver->sync_obj_ref(bo->sync_obj);
-       sync_obj_arg = bo->sync_obj_arg;
+       spin_unlock(&bdev->fence_lock);
+
+       if (!ret) {
+               atomic_set(&bo->reserved, 0);
+               wake_up_all(&bo->event_queue);
+       }
 
        kref_get(&bo->list_kref);
        list_add_tail(&bo->ddestroy, &bdev->ddestroy);
        spin_unlock(&glob->lru_lock);
-       spin_unlock(&bdev->fence_lock);
 
        if (sync_obj) {
-               driver->sync_obj_flush(sync_obj, sync_obj_arg);
+               driver->sync_obj_flush(sync_obj);
                driver->sync_obj_unref(&sync_obj);
        }
        schedule_delayed_work(&bdev->wq,
@@ -553,68 +547,84 @@ queue:
 }
 
 /**
- * function ttm_bo_cleanup_refs
+ * function ttm_bo_cleanup_refs_and_unlock
  * If bo idle, remove from delayed- and lru lists, and unref.
  * If not idle, do nothing.
  *
+ * Must be called with lru_lock and reservation held, this function
+ * will drop both before returning.
+ *
  * @interruptible         Any sleeps should occur interruptibly.
- * @no_wait_reserve       Never wait for reserve. Return -EBUSY instead.
  * @no_wait_gpu           Never wait for gpu. Return -EBUSY instead.
  */
 
-static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
-                              bool interruptible,
-                              bool no_wait_reserve,
-                              bool no_wait_gpu)
+static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
+                                         bool interruptible,
+                                         bool no_wait_gpu)
 {
        struct ttm_bo_device *bdev = bo->bdev;
+       struct ttm_bo_driver *driver = bdev->driver;
        struct ttm_bo_global *glob = bo->glob;
        int put_count;
-       int ret = 0;
+       int ret;
 
-retry:
        spin_lock(&bdev->fence_lock);
-       ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
-       spin_unlock(&bdev->fence_lock);
+       ret = ttm_bo_wait(bo, false, false, true);
 
-       if (unlikely(ret != 0))
-               return ret;
+       if (ret && !no_wait_gpu) {
+               void *sync_obj;
 
-retry_reserve:
-       spin_lock(&glob->lru_lock);
+               /*
+                * Take a reference to the fence and unreserve,
+                * at this point the buffer should be dead, so
+                * no new sync objects can be attached.
+                */
+               sync_obj = driver->sync_obj_ref(&bo->sync_obj);
+               spin_unlock(&bdev->fence_lock);
 
-       if (unlikely(list_empty(&bo->ddestroy))) {
+               atomic_set(&bo->reserved, 0);
+               wake_up_all(&bo->event_queue);
                spin_unlock(&glob->lru_lock);
-               return 0;
-       }
-
-       ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
 
-       if (unlikely(ret == -EBUSY)) {
-               spin_unlock(&glob->lru_lock);
-               if (likely(!no_wait_reserve))
-                       ret = ttm_bo_wait_unreserved(bo, interruptible);
-               if (unlikely(ret != 0))
+               ret = driver->sync_obj_wait(sync_obj, false, interruptible);
+               driver->sync_obj_unref(&sync_obj);
+               if (ret)
                        return ret;
 
-               goto retry_reserve;
-       }
+               /*
+                * remove sync_obj with ttm_bo_wait, the wait should be
+                * finished, and no new wait object should have been added.
+                */
+               spin_lock(&bdev->fence_lock);
+               ret = ttm_bo_wait(bo, false, false, true);
+               WARN_ON(ret);
+               spin_unlock(&bdev->fence_lock);
+               if (ret)
+                       return ret;
 
-       BUG_ON(ret != 0);
+               spin_lock(&glob->lru_lock);
+               ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
 
-       /**
-        * We can re-check for sync object without taking
-        * the bo::lock since setting the sync object requires
-        * also bo::reserved. A busy object at this point may
-        * be caused by another thread recently starting an accelerated
-        * eviction.
-        */
+               /*
+                * We raced, and lost, someone else holds the reservation now,
+                * and is probably busy in ttm_bo_cleanup_memtype_use.
+                *
+                * Even if it's not the case, because we finished waiting any
+                * delayed destruction would succeed, so just return success
+                * here.
+                */
+               if (ret) {
+                       spin_unlock(&glob->lru_lock);
+                       return 0;
+               }
+       } else
+               spin_unlock(&bdev->fence_lock);
 
-       if (unlikely(bo->sync_obj)) {
+       if (ret || unlikely(list_empty(&bo->ddestroy))) {
                atomic_set(&bo->reserved, 0);
                wake_up_all(&bo->event_queue);
                spin_unlock(&glob->lru_lock);
-               goto retry;
+               return ret;
        }
 
        put_count = ttm_bo_del_from_lru(bo);
@@ -657,9 +667,13 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
                        kref_get(&nentry->list_kref);
                }
 
-               spin_unlock(&glob->lru_lock);
-               ret = ttm_bo_cleanup_refs(entry, false, !remove_all,
-                                         !remove_all);
+               ret = ttm_bo_reserve_locked(entry, false, !remove_all, false, 0);
+               if (!ret)
+                       ret = ttm_bo_cleanup_refs_and_unlock(entry, false,
+                                                            !remove_all);
+               else
+                       spin_unlock(&glob->lru_lock);
+
                kref_put(&entry->list_kref, ttm_bo_release_list);
                entry = nentry;
 
@@ -697,6 +711,7 @@ static void ttm_bo_release(struct kref *kref)
        struct ttm_bo_device *bdev = bo->bdev;
        struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
 
+       write_lock(&bdev->vm_lock);
        if (likely(bo->vm_node != NULL)) {
                rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
                drm_mm_put_block(bo->vm_node);
@@ -708,18 +723,14 @@ static void ttm_bo_release(struct kref *kref)
        ttm_mem_io_unlock(man);
        ttm_bo_cleanup_refs_or_queue(bo);
        kref_put(&bo->list_kref, ttm_bo_release_list);
-       write_lock(&bdev->vm_lock);
 }
 
 void ttm_bo_unref(struct ttm_buffer_object **p_bo)
 {
        struct ttm_buffer_object *bo = *p_bo;
-       struct ttm_bo_device *bdev = bo->bdev;
 
        *p_bo = NULL;
-       write_lock(&bdev->vm_lock);
        kref_put(&bo->kref, ttm_bo_release);
-       write_unlock(&bdev->vm_lock);
 }
 EXPORT_SYMBOL(ttm_bo_unref);
 
@@ -738,7 +749,7 @@ void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
 EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
 
 static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
-                       bool no_wait_reserve, bool no_wait_gpu)
+                       bool no_wait_gpu)
 {
        struct ttm_bo_device *bdev = bo->bdev;
        struct ttm_mem_reg evict_mem;
@@ -756,7 +767,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
                goto out;
        }
 
-       BUG_ON(!atomic_read(&bo->reserved));
+       BUG_ON(!ttm_bo_is_reserved(bo));
 
        evict_mem = bo->mem;
        evict_mem.mm_node = NULL;
@@ -769,7 +780,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
        placement.num_busy_placement = 0;
        bdev->driver->evict_flags(bo, &placement);
        ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
-                               no_wait_reserve, no_wait_gpu);
+                               no_wait_gpu);
        if (ret) {
                if (ret != -ERESTARTSYS) {
                        pr_err("Failed to find memory space for buffer 0x%p eviction\n",
@@ -780,7 +791,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
        }
 
        ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
-                                    no_wait_reserve, no_wait_gpu);
+                                    no_wait_gpu);
        if (ret) {
                if (ret != -ERESTARTSYS)
                        pr_err("Buffer eviction failed\n");
@@ -794,49 +805,33 @@ out:
 
 static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
                                uint32_t mem_type,
-                               bool interruptible, bool no_wait_reserve,
+                               bool interruptible,
                                bool no_wait_gpu)
 {
        struct ttm_bo_global *glob = bdev->glob;
        struct ttm_mem_type_manager *man = &bdev->man[mem_type];
        struct ttm_buffer_object *bo;
-       int ret, put_count = 0;
+       int ret = -EBUSY, put_count;
 
-retry:
        spin_lock(&glob->lru_lock);
-       if (list_empty(&man->lru)) {
-               spin_unlock(&glob->lru_lock);
-               return -EBUSY;
+       list_for_each_entry(bo, &man->lru, lru) {
+               ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+               if (!ret)
+                       break;
        }
 
-       bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
-       kref_get(&bo->list_kref);
-
-       if (!list_empty(&bo->ddestroy)) {
+       if (ret) {
                spin_unlock(&glob->lru_lock);
-               ret = ttm_bo_cleanup_refs(bo, interruptible,
-                                         no_wait_reserve, no_wait_gpu);
-               kref_put(&bo->list_kref, ttm_bo_release_list);
-
                return ret;
        }
 
-       ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
-
-       if (unlikely(ret == -EBUSY)) {
-               spin_unlock(&glob->lru_lock);
-               if (likely(!no_wait_reserve))
-                       ret = ttm_bo_wait_unreserved(bo, interruptible);
+       kref_get(&bo->list_kref);
 
+       if (!list_empty(&bo->ddestroy)) {
+               ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible,
+                                                    no_wait_gpu);
                kref_put(&bo->list_kref, ttm_bo_release_list);
-
-               /**
-                * We *need* to retry after releasing the lru lock.
-                */
-
-               if (unlikely(ret != 0))
-                       return ret;
-               goto retry;
+               return ret;
        }
 
        put_count = ttm_bo_del_from_lru(bo);
@@ -846,7 +841,7 @@ retry:
 
        ttm_bo_list_ref_sub(bo, put_count, true);
 
-       ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu);
+       ret = ttm_bo_evict(bo, interruptible, no_wait_gpu);
        ttm_bo_unreserve(bo);
 
        kref_put(&bo->list_kref, ttm_bo_release_list);
@@ -871,7 +866,6 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
                                        struct ttm_placement *placement,
                                        struct ttm_mem_reg *mem,
                                        bool interruptible,
-                                       bool no_wait_reserve,
                                        bool no_wait_gpu)
 {
        struct ttm_bo_device *bdev = bo->bdev;
@@ -884,8 +878,8 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
                        return ret;
                if (mem->mm_node)
                        break;
-               ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
-                                               no_wait_reserve, no_wait_gpu);
+               ret = ttm_mem_evict_first(bdev, mem_type,
+                                         interruptible, no_wait_gpu);
                if (unlikely(ret != 0))
                        return ret;
        } while (1);
@@ -950,7 +944,7 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
                        struct ttm_placement *placement,
                        struct ttm_mem_reg *mem,
-                       bool interruptible, bool no_wait_reserve,
+                       bool interruptible,
                        bool no_wait_gpu)
 {
        struct ttm_bo_device *bdev = bo->bdev;
@@ -1041,7 +1035,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
                }
 
                ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
-                                               interruptible, no_wait_reserve, no_wait_gpu);
+                                               interruptible, no_wait_gpu);
                if (ret == 0 && mem->mm_node) {
                        mem->placement = cur_flags;
                        return 0;
@@ -1054,26 +1048,16 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
 }
 EXPORT_SYMBOL(ttm_bo_mem_space);
 
-int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
-{
-       if ((atomic_read(&bo->cpu_writers) > 0) && no_wait)
-               return -EBUSY;
-
-       return wait_event_interruptible(bo->event_queue,
-                                       atomic_read(&bo->cpu_writers) == 0);
-}
-EXPORT_SYMBOL(ttm_bo_wait_cpu);
-
 int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
                        struct ttm_placement *placement,
-                       bool interruptible, bool no_wait_reserve,
+                       bool interruptible,
                        bool no_wait_gpu)
 {
        int ret = 0;
        struct ttm_mem_reg mem;
        struct ttm_bo_device *bdev = bo->bdev;
 
-       BUG_ON(!atomic_read(&bo->reserved));
+       BUG_ON(!ttm_bo_is_reserved(bo));
 
        /*
         * FIXME: It's possible to pipeline buffer moves.
@@ -1093,10 +1077,12 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
        /*
         * Determine where to move the buffer.
         */
-       ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait_reserve, no_wait_gpu);
+       ret = ttm_bo_mem_space(bo, placement, &mem,
+                              interruptible, no_wait_gpu);
        if (ret)
                goto out_unlock;
-       ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu);
+       ret = ttm_bo_handle_move_mem(bo, &mem, false,
+                                    interruptible, no_wait_gpu);
 out_unlock:
        if (ret && mem.mm_node)
                ttm_bo_mem_put(bo, &mem);
@@ -1125,12 +1111,12 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement,
 
 int ttm_bo_validate(struct ttm_buffer_object *bo,
                        struct ttm_placement *placement,
-                       bool interruptible, bool no_wait_reserve,
+                       bool interruptible,
                        bool no_wait_gpu)
 {
        int ret;
 
-       BUG_ON(!atomic_read(&bo->reserved));
+       BUG_ON(!ttm_bo_is_reserved(bo));
        /* Check that range is valid */
        if (placement->lpfn || placement->fpfn)
                if (placement->fpfn > placement->lpfn ||
@@ -1141,7 +1127,8 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
         */
        ret = ttm_bo_mem_compat(placement, &bo->mem);
        if (ret < 0) {
-               ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait_reserve, no_wait_gpu);
+               ret = ttm_bo_move_buffer(bo, placement, interruptible,
+                                        no_wait_gpu);
                if (ret)
                        return ret;
        } else {
@@ -1179,7 +1166,6 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
                enum ttm_bo_type type,
                struct ttm_placement *placement,
                uint32_t page_alignment,
-               unsigned long buffer_start,
                bool interruptible,
                struct file *persistent_swap_storage,
                size_t acc_size,
@@ -1200,7 +1186,6 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
                return -ENOMEM;
        }
 
-       size += buffer_start & ~PAGE_MASK;
        num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
        if (num_pages == 0) {
                pr_err("Illegal buffer object size\n");
@@ -1233,7 +1218,6 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
        bo->mem.page_alignment = page_alignment;
        bo->mem.bus.io_reserved_vm = false;
        bo->mem.bus.io_reserved_count = 0;
-       bo->buffer_start = buffer_start & PAGE_MASK;
        bo->priv_flags = 0;
        bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
        bo->seq_valid = false;
@@ -1257,7 +1241,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
                        goto out_err;
        }
 
-       ret = ttm_bo_validate(bo, placement, interruptible, false, false);
+       ret = ttm_bo_validate(bo, placement, interruptible, false);
        if (ret)
                goto out_err;
 
@@ -1306,7 +1290,6 @@ int ttm_bo_create(struct ttm_bo_device *bdev,
                        enum ttm_bo_type type,
                        struct ttm_placement *placement,
                        uint32_t page_alignment,
-                       unsigned long buffer_start,
                        bool interruptible,
                        struct file *persistent_swap_storage,
                        struct ttm_buffer_object **p_bo)
@@ -1321,8 +1304,8 @@ int ttm_bo_create(struct ttm_bo_device *bdev,
 
        acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
        ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
-                               buffer_start, interruptible,
-                         persistent_swap_storage, acc_size, NULL, NULL);
+                         interruptible, persistent_swap_storage, acc_size,
+                         NULL, NULL);
        if (likely(ret == 0))
                *p_bo = bo;
 
@@ -1344,7 +1327,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
        spin_lock(&glob->lru_lock);
        while (!list_empty(&man->lru)) {
                spin_unlock(&glob->lru_lock);
-               ret = ttm_mem_evict_first(bdev, mem_type, false, false, false);
+               ret = ttm_mem_evict_first(bdev, mem_type, false, false);
                if (ret) {
                        if (allow_errors) {
                                return ret;
@@ -1577,7 +1560,6 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
                goto out_no_addr_mm;
 
        INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
-       bdev->nice_mode = true;
        INIT_LIST_HEAD(&bdev->ddestroy);
        bdev->dev_mapping = NULL;
        bdev->glob = glob;
@@ -1721,7 +1703,6 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
        struct ttm_bo_driver *driver = bo->bdev->driver;
        struct ttm_bo_device *bdev = bo->bdev;
        void *sync_obj;
-       void *sync_obj_arg;
        int ret = 0;
 
        if (likely(bo->sync_obj == NULL))
@@ -1729,7 +1710,7 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
 
        while (bo->sync_obj) {
 
-               if (driver->sync_obj_signaled(bo->sync_obj, bo->sync_obj_arg)) {
+               if (driver->sync_obj_signaled(bo->sync_obj)) {
                        void *tmp_obj = bo->sync_obj;
                        bo->sync_obj = NULL;
                        clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
@@ -1743,9 +1724,8 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
                        return -EBUSY;
 
                sync_obj = driver->sync_obj_ref(bo->sync_obj);
-               sync_obj_arg = bo->sync_obj_arg;
                spin_unlock(&bdev->fence_lock);
-               ret = driver->sync_obj_wait(sync_obj, sync_obj_arg,
+               ret = driver->sync_obj_wait(sync_obj,
                                            lazy, interruptible);
                if (unlikely(ret != 0)) {
                        driver->sync_obj_unref(&sync_obj);
@@ -1753,8 +1733,7 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
                        return ret;
                }
                spin_lock(&bdev->fence_lock);
-               if (likely(bo->sync_obj == sync_obj &&
-                          bo->sync_obj_arg == sync_obj_arg)) {
+               if (likely(bo->sync_obj == sync_obj)) {
                        void *tmp_obj = bo->sync_obj;
                        bo->sync_obj = NULL;
                        clear_bit(TTM_BO_PRIV_FLAG_MOVING,
@@ -1797,8 +1776,7 @@ EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
 
 void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
 {
-       if (atomic_dec_and_test(&bo->cpu_writers))
-               wake_up_all(&bo->event_queue);
+       atomic_dec(&bo->cpu_writers);
 }
 EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
 
@@ -1817,40 +1795,25 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
        uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
 
        spin_lock(&glob->lru_lock);
-       while (ret == -EBUSY) {
-               if (unlikely(list_empty(&glob->swap_lru))) {
-                       spin_unlock(&glob->lru_lock);
-                       return -EBUSY;
-               }
-
-               bo = list_first_entry(&glob->swap_lru,
-                                     struct ttm_buffer_object, swap);
-               kref_get(&bo->list_kref);
+       list_for_each_entry(bo, &glob->swap_lru, swap) {
+               ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+               if (!ret)
+                       break;
+       }
 
-               if (!list_empty(&bo->ddestroy)) {
-                       spin_unlock(&glob->lru_lock);
-                       (void) ttm_bo_cleanup_refs(bo, false, false, false);
-                       kref_put(&bo->list_kref, ttm_bo_release_list);
-                       spin_lock(&glob->lru_lock);
-                       continue;
-               }
+       if (ret) {
+               spin_unlock(&glob->lru_lock);
+               return ret;
+       }
 
-               /**
-                * Reserve buffer. Since we unlock while sleeping, we need
-                * to re-check that nobody removed us from the swap-list while
-                * we slept.
-                */
+       kref_get(&bo->list_kref);
 
-               ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
-               if (unlikely(ret == -EBUSY)) {
-                       spin_unlock(&glob->lru_lock);
-                       ttm_bo_wait_unreserved(bo, false);
-                       kref_put(&bo->list_kref, ttm_bo_release_list);
-                       spin_lock(&glob->lru_lock);
-               }
+       if (!list_empty(&bo->ddestroy)) {
+               ret = ttm_bo_cleanup_refs_and_unlock(bo, false, false);
+               kref_put(&bo->list_kref, ttm_bo_release_list);
+               return ret;
        }
 
-       BUG_ON(ret != 0);
        put_count = ttm_bo_del_from_lru(bo);
        spin_unlock(&glob->lru_lock);
 
@@ -1876,7 +1839,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
                evict_mem.mem_type = TTM_PL_SYSTEM;
 
                ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
-                                            false, false, false);
+                                            false, false);
                if (unlikely(ret != 0))
                        goto out;
        }
index 2026060..9e9c5d2 100644 (file)
@@ -43,7 +43,7 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
 }
 
 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
-                   bool evict, bool no_wait_reserve,
+                   bool evict,
                    bool no_wait_gpu, struct ttm_mem_reg *new_mem)
 {
        struct ttm_tt *ttm = bo->ttm;
@@ -314,7 +314,7 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
 }
 
 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
-                      bool evict, bool no_wait_reserve, bool no_wait_gpu,
+                      bool evict, bool no_wait_gpu,
                       struct ttm_mem_reg *new_mem)
 {
        struct ttm_bo_device *bdev = bo->bdev;
@@ -611,8 +611,7 @@ EXPORT_SYMBOL(ttm_bo_kunmap);
 
 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
                              void *sync_obj,
-                             void *sync_obj_arg,
-                             bool evict, bool no_wait_reserve,
+                             bool evict,
                              bool no_wait_gpu,
                              struct ttm_mem_reg *new_mem)
 {
@@ -630,7 +629,6 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
                bo->sync_obj = NULL;
        }
        bo->sync_obj = driver->sync_obj_ref(sync_obj);
-       bo->sync_obj_arg = sync_obj_arg;
        if (evict) {
                ret = ttm_bo_wait(bo, false, false, false);
                spin_unlock(&bdev->fence_lock);
index 3ba72db..74705f3 100644 (file)
@@ -259,8 +259,8 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
        read_lock(&bdev->vm_lock);
        bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff,
                                 (vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
-       if (likely(bo != NULL))
-               ttm_bo_reference(bo);
+       if (likely(bo != NULL) && !kref_get_unless_zero(&bo->kref))
+               bo = NULL;
        read_unlock(&bdev->vm_lock);
 
        if (unlikely(bo == NULL)) {
index 1937069..cd9e452 100644 (file)
@@ -185,10 +185,7 @@ retry_this_bo:
                        ttm_eu_backoff_reservation_locked(list);
                        spin_unlock(&glob->lru_lock);
                        ttm_eu_list_ref_sub(list);
-                       ret = ttm_bo_wait_cpu(bo, false);
-                       if (ret)
-                               return ret;
-                       goto retry;
+                       return -EBUSY;
                }
        }
 
@@ -216,19 +213,18 @@ void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
        driver = bdev->driver;
        glob = bo->glob;
 
-       spin_lock(&bdev->fence_lock);
        spin_lock(&glob->lru_lock);
+       spin_lock(&bdev->fence_lock);
 
        list_for_each_entry(entry, list, head) {
                bo = entry->bo;
                entry->old_sync_obj = bo->sync_obj;
                bo->sync_obj = driver->sync_obj_ref(sync_obj);
-               bo->sync_obj_arg = entry->new_sync_obj_arg;
                ttm_bo_unreserve_locked(bo);
                entry->reserved = false;
        }
-       spin_unlock(&glob->lru_lock);
        spin_unlock(&bdev->fence_lock);
+       spin_unlock(&glob->lru_lock);
 
        list_for_each_entry(entry, list, head) {
                if (entry->old_sync_obj)
index 479c6b0..dbc2def 100644 (file)
@@ -367,7 +367,6 @@ int ttm_mem_global_init(struct ttm_mem_global *glob)
        spin_lock_init(&glob->lock);
        glob->swap_queue = create_singlethread_workqueue("ttm_swap");
        INIT_WORK(&glob->work, ttm_shrink_work);
-       init_waitqueue_head(&glob->queue);
        ret = kobject_init_and_add(
                &glob->kobj, &ttm_mem_glob_kobj_type, ttm_get_kobj(), "memory_accounting");
        if (unlikely(ret != 0)) {
index c785787..58a5f32 100644 (file)
@@ -80,7 +80,7 @@ struct ttm_object_file {
  */
 
 struct ttm_object_device {
-       rwlock_t object_lock;
+       spinlock_t object_lock;
        struct drm_open_hash object_hash;
        atomic_t object_count;
        struct ttm_mem_global *mem_glob;
@@ -157,12 +157,12 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
        base->refcount_release = refcount_release;
        base->ref_obj_release = ref_obj_release;
        base->object_type = object_type;
-       write_lock(&tdev->object_lock);
        kref_init(&base->refcount);
-       ret = drm_ht_just_insert_please(&tdev->object_hash,
-                                       &base->hash,
-                                       (unsigned long)base, 31, 0, 0);
-       write_unlock(&tdev->object_lock);
+       spin_lock(&tdev->object_lock);
+       ret = drm_ht_just_insert_please_rcu(&tdev->object_hash,
+                                           &base->hash,
+                                           (unsigned long)base, 31, 0, 0);
+       spin_unlock(&tdev->object_lock);
        if (unlikely(ret != 0))
                goto out_err0;
 
@@ -174,7 +174,9 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
 
        return 0;
 out_err1:
-       (void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
+       spin_lock(&tdev->object_lock);
+       (void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash);
+       spin_unlock(&tdev->object_lock);
 out_err0:
        return ret;
 }
@@ -186,30 +188,29 @@ static void ttm_release_base(struct kref *kref)
            container_of(kref, struct ttm_base_object, refcount);
        struct ttm_object_device *tdev = base->tfile->tdev;
 
-       (void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
-       write_unlock(&tdev->object_lock);
+       spin_lock(&tdev->object_lock);
+       (void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash);
+       spin_unlock(&tdev->object_lock);
+
+       /*
+        * Note: We don't use synchronize_rcu() here because it's far
+        * too slow. It's up to the user to free the object using
+        * call_rcu() or ttm_base_object_kfree().
+        */
+
        if (base->refcount_release) {
                ttm_object_file_unref(&base->tfile);
                base->refcount_release(&base);
        }
-       write_lock(&tdev->object_lock);
 }
 
 void ttm_base_object_unref(struct ttm_base_object **p_base)
 {
        struct ttm_base_object *base = *p_base;
-       struct ttm_object_device *tdev = base->tfile->tdev;
 
        *p_base = NULL;
 
-       /*
-        * Need to take the lock here to avoid racing with
-        * users trying to look up the object.
-        */
-
-       write_lock(&tdev->object_lock);
        kref_put(&base->refcount, ttm_release_base);
-       write_unlock(&tdev->object_lock);
 }
 EXPORT_SYMBOL(ttm_base_object_unref);
 
@@ -221,14 +222,14 @@ struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
        struct drm_hash_item *hash;
        int ret;
 
-       read_lock(&tdev->object_lock);
-       ret = drm_ht_find_item(&tdev->object_hash, key, &hash);
+       rcu_read_lock();
+       ret = drm_ht_find_item_rcu(&tdev->object_hash, key, &hash);
 
        if (likely(ret == 0)) {
                base = drm_hash_entry(hash, struct ttm_base_object, hash);
-               kref_get(&base->refcount);
+               ret = kref_get_unless_zero(&base->refcount) ? 0 : -EINVAL;
        }
-       read_unlock(&tdev->object_lock);
+       rcu_read_unlock();
 
        if (unlikely(ret != 0))
                return NULL;
@@ -426,7 +427,7 @@ struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global
                return NULL;
 
        tdev->mem_glob = mem_glob;
-       rwlock_init(&tdev->object_lock);
+       spin_lock_init(&tdev->object_lock);
        atomic_set(&tdev->object_count, 0);
        ret = drm_ht_create(&tdev->object_hash, hash_order);
 
@@ -444,9 +445,9 @@ void ttm_object_device_release(struct ttm_object_device **p_tdev)
 
        *p_tdev = NULL;
 
-       write_lock(&tdev->object_lock);
+       spin_lock(&tdev->object_lock);
        drm_ht_remove(&tdev->object_hash);
-       write_unlock(&tdev->object_lock);
+       spin_unlock(&tdev->object_lock);
 
        kfree(tdev);
 }
index b3b2ced..512f44a 100644 (file)
@@ -84,7 +84,8 @@ udl_detect(struct drm_connector *connector, bool force)
        return connector_status_connected;
 }
 
-struct drm_encoder *udl_best_single_encoder(struct drm_connector *connector)
+static struct drm_encoder*
+udl_best_single_encoder(struct drm_connector *connector)
 {
        int enc_id = connector->encoder_ids[0];
        struct drm_mode_object *obj;
@@ -97,8 +98,9 @@ struct drm_encoder *udl_best_single_encoder(struct drm_connector *connector)
        return encoder;
 }
 
-int udl_connector_set_property(struct drm_connector *connector, struct drm_property *property,
-                              uint64_t val)
+static int udl_connector_set_property(struct drm_connector *connector,
+                                     struct drm_property *property,
+                                     uint64_t val)
 {
        return 0;
 }
@@ -110,13 +112,13 @@ static void udl_connector_destroy(struct drm_connector *connector)
        kfree(connector);
 }
 
-struct drm_connector_helper_funcs udl_connector_helper_funcs = {
+static struct drm_connector_helper_funcs udl_connector_helper_funcs = {
        .get_modes = udl_get_modes,
        .mode_valid = udl_mode_valid,
        .best_encoder = udl_best_single_encoder,
 };
 
-struct drm_connector_funcs udl_connector_funcs = {
+static struct drm_connector_funcs udl_connector_funcs = {
        .dpms = drm_helper_connector_dpms,
        .detect = udl_detect,
        .fill_modes = drm_helper_probe_single_connector_modes,
@@ -138,7 +140,7 @@ int udl_connector_init(struct drm_device *dev, struct drm_encoder *encoder)
        drm_sysfs_connector_add(connector);
        drm_mode_connector_attach_encoder(connector, encoder);
 
-       drm_connector_attach_property(connector,
+       drm_object_attach_property(&connector->base,
                                      dev->mode_config.dirty_info_property,
                                      1);
        return 0;
index 586869c..2cc6cd9 100644 (file)
@@ -5,6 +5,7 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
            vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \
            vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
            vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \
-           vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o
+           vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \
+           vmwgfx_surface.o
 
 obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h b/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h
new file mode 100644 (file)
index 0000000..8369c3b
--- /dev/null
@@ -0,0 +1,909 @@
+/**************************************************************************
+ *
+ * Copyright Â© 2008-2012 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifdef __KERNEL__
+
+#include <drm/vmwgfx_drm.h>
+#define surf_size_struct struct drm_vmw_size
+
+#else /* __KERNEL__ */
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(_A) (sizeof(_A) / sizeof((_A)[0]))
+#endif /* ARRAY_SIZE */
+
+#define DIV_ROUND_UP(x, y)  (((x) + (y) - 1) / (y))
+#define max_t(type, x, y)  ((x) > (y) ? (x) : (y))
+#define surf_size_struct SVGA3dSize
+#define u32 uint32
+
+#endif /* __KERNEL__ */
+
+#include "svga3d_reg.h"
+
+/*
+ * enum svga3d_block_desc describes the active data channels in a block.
+ *
+ * There can be at-most four active channels in a block:
+ *    1. Red, bump W, luminance and depth are stored in the first channel.
+ *    2. Green, bump V and stencil are stored in the second channel.
+ *    3. Blue and bump U are stored in the third channel.
+ *    4. Alpha and bump Q are stored in the fourth channel.
+ *
+ * Block channels can be used to store compressed and buffer data:
+ *    1. For compressed formats, only the data channel is used and its size
+ *       is equal to that of a singular block in the compression scheme.
+ *    2. For buffer formats, only the data channel is used and its size is
+ *       exactly one byte in length.
+ *    3. In each case the bit depth represent the size of a singular block.
+ *
+ * Note: Compressed and IEEE formats do not use the bitMask structure.
+ */
+
+enum svga3d_block_desc {
+       SVGA3DBLOCKDESC_NONE        = 0,         /* No channels are active */
+       SVGA3DBLOCKDESC_BLUE        = 1 << 0,    /* Block with red channel
+                                                   data */
+       SVGA3DBLOCKDESC_U           = 1 << 0,    /* Block with bump U channel
+                                                   data */
+       SVGA3DBLOCKDESC_UV_VIDEO    = 1 << 7,    /* Block with alternating video
+                                                   U and V */
+       SVGA3DBLOCKDESC_GREEN       = 1 << 1,    /* Block with green channel
+                                                   data */
+       SVGA3DBLOCKDESC_V           = 1 << 1,    /* Block with bump V channel
+                                                   data */
+       SVGA3DBLOCKDESC_STENCIL     = 1 << 1,    /* Block with a stencil
+                                                   channel */
+       SVGA3DBLOCKDESC_RED         = 1 << 2,    /* Block with blue channel
+                                                   data */
+       SVGA3DBLOCKDESC_W           = 1 << 2,    /* Block with bump W channel
+                                                   data */
+       SVGA3DBLOCKDESC_LUMINANCE   = 1 << 2,    /* Block with luminance channel
+                                                   data */
+       SVGA3DBLOCKDESC_Y           = 1 << 2,    /* Block with video luminance
+                                                   data */
+       SVGA3DBLOCKDESC_DEPTH       = 1 << 2,    /* Block with depth channel */
+       SVGA3DBLOCKDESC_ALPHA       = 1 << 3,    /* Block with an alpha
+                                                   channel */
+       SVGA3DBLOCKDESC_Q           = 1 << 3,    /* Block with bump Q channel
+                                                   data */
+       SVGA3DBLOCKDESC_BUFFER      = 1 << 4,    /* Block stores 1 byte of
+                                                   data */
+       SVGA3DBLOCKDESC_COMPRESSED  = 1 << 5,    /* Block stores n bytes of
+                                                   data depending on the
+                                                   compression method used */
+       SVGA3DBLOCKDESC_IEEE_FP     = 1 << 6,    /* Block stores data in an IEEE
+                                                   floating point
+                                                   representation in
+                                                   all channels */
+       SVGA3DBLOCKDESC_PLANAR_YUV  = 1 << 8,    /* Three separate blocks store
+                                                   data. */
+       SVGA3DBLOCKDESC_U_VIDEO     = 1 << 9,    /* Block with U video data */
+       SVGA3DBLOCKDESC_V_VIDEO     = 1 << 10,   /* Block with V video data */
+       SVGA3DBLOCKDESC_EXP         = 1 << 11,   /* Shared exponent */
+       SVGA3DBLOCKDESC_SRGB        = 1 << 12,   /* Data is in sRGB format */
+       SVGA3DBLOCKDESC_2PLANAR_YUV = 1 << 13,   /* 2 planes of Y, UV,
+                                                   e.g., NV12. */
+       SVGA3DBLOCKDESC_3PLANAR_YUV = 1 << 14,   /* 3 planes of separate
+                                                   Y, U, V, e.g., YV12. */
+
+       SVGA3DBLOCKDESC_RG         = SVGA3DBLOCKDESC_RED |
+       SVGA3DBLOCKDESC_GREEN,
+       SVGA3DBLOCKDESC_RGB        = SVGA3DBLOCKDESC_RG |
+       SVGA3DBLOCKDESC_BLUE,
+       SVGA3DBLOCKDESC_RGB_SRGB   = SVGA3DBLOCKDESC_RGB |
+       SVGA3DBLOCKDESC_SRGB,
+       SVGA3DBLOCKDESC_RGBA       = SVGA3DBLOCKDESC_RGB |
+       SVGA3DBLOCKDESC_ALPHA,
+       SVGA3DBLOCKDESC_RGBA_SRGB  = SVGA3DBLOCKDESC_RGBA |
+       SVGA3DBLOCKDESC_SRGB,
+       SVGA3DBLOCKDESC_UV         = SVGA3DBLOCKDESC_U |
+       SVGA3DBLOCKDESC_V,
+       SVGA3DBLOCKDESC_UVL        = SVGA3DBLOCKDESC_UV |
+       SVGA3DBLOCKDESC_LUMINANCE,
+       SVGA3DBLOCKDESC_UVW        = SVGA3DBLOCKDESC_UV |
+       SVGA3DBLOCKDESC_W,
+       SVGA3DBLOCKDESC_UVWA       = SVGA3DBLOCKDESC_UVW |
+       SVGA3DBLOCKDESC_ALPHA,
+       SVGA3DBLOCKDESC_UVWQ       = SVGA3DBLOCKDESC_U |
+       SVGA3DBLOCKDESC_V |
+       SVGA3DBLOCKDESC_W |
+       SVGA3DBLOCKDESC_Q,
+       SVGA3DBLOCKDESC_LA         = SVGA3DBLOCKDESC_LUMINANCE |
+       SVGA3DBLOCKDESC_ALPHA,
+       SVGA3DBLOCKDESC_R_FP       = SVGA3DBLOCKDESC_RED |
+       SVGA3DBLOCKDESC_IEEE_FP,
+       SVGA3DBLOCKDESC_RG_FP      = SVGA3DBLOCKDESC_R_FP |
+       SVGA3DBLOCKDESC_GREEN,
+       SVGA3DBLOCKDESC_RGB_FP     = SVGA3DBLOCKDESC_RG_FP |
+       SVGA3DBLOCKDESC_BLUE,
+       SVGA3DBLOCKDESC_RGBA_FP    = SVGA3DBLOCKDESC_RGB_FP |
+       SVGA3DBLOCKDESC_ALPHA,
+       SVGA3DBLOCKDESC_DS         = SVGA3DBLOCKDESC_DEPTH |
+       SVGA3DBLOCKDESC_STENCIL,
+       SVGA3DBLOCKDESC_YUV        = SVGA3DBLOCKDESC_UV_VIDEO |
+       SVGA3DBLOCKDESC_Y,
+       SVGA3DBLOCKDESC_AYUV       = SVGA3DBLOCKDESC_ALPHA |
+       SVGA3DBLOCKDESC_Y |
+       SVGA3DBLOCKDESC_U_VIDEO |
+       SVGA3DBLOCKDESC_V_VIDEO,
+       SVGA3DBLOCKDESC_RGBE       = SVGA3DBLOCKDESC_RGB |
+       SVGA3DBLOCKDESC_EXP,
+       SVGA3DBLOCKDESC_COMPRESSED_SRGB = SVGA3DBLOCKDESC_COMPRESSED |
+       SVGA3DBLOCKDESC_SRGB,
+       SVGA3DBLOCKDESC_NV12       = SVGA3DBLOCKDESC_PLANAR_YUV |
+       SVGA3DBLOCKDESC_2PLANAR_YUV,
+       SVGA3DBLOCKDESC_YV12       = SVGA3DBLOCKDESC_PLANAR_YUV |
+       SVGA3DBLOCKDESC_3PLANAR_YUV,
+};
+
+/*
+ * SVGA3dSurfaceDesc describes the actual pixel data.
+ *
+ * This structure provides the following information:
+ *    1. Block description.
+ *    2. Dimensions of a block in the surface.
+ *    3. Size of block in bytes.
+ *    4. Bit depth of the pixel data.
+ *    5. Channel bit depths and masks (if applicable).
+ */
+#define SVGA3D_CHANNEL_DEF(type)               \
+       struct {                                \
+               union {                         \
+                       type blue;              \
+                       type u;                 \
+                       type uv_video;          \
+                       type u_video;           \
+               };                              \
+               union {                         \
+                       type green;             \
+                       type v;                 \
+                       type stencil;           \
+                       type v_video;           \
+               };                              \
+               union {                         \
+                       type red;               \
+                       type w;                 \
+                       type luminance;         \
+                       type y;                 \
+                       type depth;             \
+                       type data;              \
+               };                              \
+               union {                         \
+                       type alpha;             \
+                       type q;                 \
+                       type exp;               \
+               };                              \
+       }
+
+struct svga3d_surface_desc {
+       enum svga3d_block_desc block_desc;
+       surf_size_struct block_size;
+       u32 bytes_per_block;
+       u32 pitch_bytes_per_block;
+
+       struct {
+               u32 total;
+               SVGA3D_CHANNEL_DEF(uint8);
+       } bit_depth;
+
+       struct {
+               SVGA3D_CHANNEL_DEF(uint8);
+       } bit_offset;
+};
+
+static const struct svga3d_surface_desc svga3d_surface_descs[] = {
+       {SVGA3DBLOCKDESC_NONE,
+        {1, 1, 1},  0, 0, {0, {{0}, {0}, {0}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_FORMAT_INVALID */
+
+       {SVGA3DBLOCKDESC_RGB,
+        {1, 1, 1},  4, 4, {24, {{8}, {8}, {8}, {0} } },
+        {{{0}, {8}, {16}, {24} } } },   /* SVGA3D_X8R8G8B8 */
+
+       {SVGA3DBLOCKDESC_RGBA,
+        {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
+        {{{0}, {8}, {16}, {24} } } },   /* SVGA3D_A8R8G8B8 */
+
+       {SVGA3DBLOCKDESC_RGB,
+        {1, 1, 1},  2, 2, {16, {{5}, {6}, {5}, {0} } },
+        {{{0}, {5}, {11}, {0} } } },    /* SVGA3D_R5G6B5 */
+
+       {SVGA3DBLOCKDESC_RGB,
+        {1, 1, 1},  2, 2, {15, {{5}, {5}, {5}, {0} } },
+        {{{0}, {5}, {10}, {0} } } },    /* SVGA3D_X1R5G5B5 */
+
+       {SVGA3DBLOCKDESC_RGBA,
+        {1, 1, 1},  2, 2, {16, {{5}, {5}, {5}, {1} } },
+        {{{0}, {5}, {10}, {15} } } },   /* SVGA3D_A1R5G5B5 */
+
+       {SVGA3DBLOCKDESC_RGBA,
+        {1, 1, 1},  2, 2, {16, {{4}, {4}, {4}, {4} } },
+        {{{0}, {4}, {8}, {12} } } },    /* SVGA3D_A4R4G4B4 */
+
+       {SVGA3DBLOCKDESC_DEPTH,
+        {1, 1, 1},  4, 4, {32, {{0}, {0}, {32}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_Z_D32 */
+
+       {SVGA3DBLOCKDESC_DEPTH,
+        {1, 1, 1},  2, 2, {16, {{0}, {0}, {16}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_Z_D16 */
+
+       {SVGA3DBLOCKDESC_DS,
+        {1, 1, 1},  4, 4, {32, {{0}, {8}, {24}, {0} } },
+        {{{0}, {24}, {0}, {0} } } },    /* SVGA3D_Z_D24S8 */
+
+       {SVGA3DBLOCKDESC_DS,
+        {1, 1, 1},  2, 2, {16, {{0}, {1}, {15}, {0} } },
+        {{{0}, {15}, {0}, {0} } } },    /* SVGA3D_Z_D15S1 */
+
+       {SVGA3DBLOCKDESC_LUMINANCE,
+        {1, 1, 1},  1, 1, {8, {{0}, {0}, {8}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_LUMINANCE8 */
+
+       {SVGA3DBLOCKDESC_LA,
+        {1, 1, 1},  1, 1, {8, {{0}, {0}, {4}, {4} } },
+        {{{0}, {0}, {0}, {4} } } },     /* SVGA3D_LUMINANCE4_ALPHA4 */
+
+       {SVGA3DBLOCKDESC_LUMINANCE,
+        {1, 1, 1},  2, 2, {16, {{0}, {0}, {16}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_LUMINANCE16 */
+
+       {SVGA3DBLOCKDESC_LA,
+        {1, 1, 1},  2, 2, {16, {{0}, {0}, {8}, {8} } },
+        {{{0}, {0}, {0}, {8} } } },     /* SVGA3D_LUMINANCE8_ALPHA8 */
+
+       {SVGA3DBLOCKDESC_COMPRESSED,
+        {4, 4, 1},  8, 8, {64, {{0}, {0}, {64}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_DXT1 */
+
+       {SVGA3DBLOCKDESC_COMPRESSED,
+        {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_DXT2 */
+
+       {SVGA3DBLOCKDESC_COMPRESSED,
+        {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_DXT3 */
+
+       {SVGA3DBLOCKDESC_COMPRESSED,
+        {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_DXT4 */
+
+       {SVGA3DBLOCKDESC_COMPRESSED,
+        {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_DXT5 */
+
+       {SVGA3DBLOCKDESC_UV,
+        {1, 1, 1},  2, 2, {16, {{0}, {0}, {8}, {8} } },
+        {{{0}, {0}, {0}, {8} } } },     /* SVGA3D_BUMPU8V8 */
+
+       {SVGA3DBLOCKDESC_UVL,
+        {1, 1, 1},  2, 2, {16, {{5}, {5}, {6}, {0} } },
+        {{{11}, {6}, {0}, {0} } } },    /* SVGA3D_BUMPL6V5U5 */
+
+       {SVGA3DBLOCKDESC_UVL,
+        {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {0} } },
+        {{{16}, {8}, {0}, {0} } } },    /* SVGA3D_BUMPX8L8V8U8 */
+
+       {SVGA3DBLOCKDESC_UVL,
+        {1, 1, 1},  3, 3, {24, {{8}, {8}, {8}, {0} } },
+        {{{16}, {8}, {0}, {0} } } },    /* SVGA3D_BUMPL8V8U8 */
+
+       {SVGA3DBLOCKDESC_RGBA_FP,
+        {1, 1, 1},  8, 8, {64, {{16}, {16}, {16}, {16} } },
+        {{{32}, {16}, {0}, {48} } } },  /* SVGA3D_ARGB_S10E5 */
+
+       {SVGA3DBLOCKDESC_RGBA_FP,
+        {1, 1, 1},  16, 16, {128, {{32}, {32}, {32}, {32} } },
+        {{{64}, {32}, {0}, {96} } } },  /* SVGA3D_ARGB_S23E8 */
+
+       {SVGA3DBLOCKDESC_RGBA,
+        {1, 1, 1},  4, 4, {32, {{10}, {10}, {10}, {2} } },
+        {{{0}, {10}, {20}, {30} } } },  /* SVGA3D_A2R10G10B10 */
+
+       {SVGA3DBLOCKDESC_UV,
+        {1, 1, 1},  2, 2, {16, {{8}, {8}, {0}, {0} } },
+        {{{8}, {0}, {0}, {0} } } },     /* SVGA3D_V8U8 */
+
+       {SVGA3DBLOCKDESC_UVWQ,
+        {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
+        {{{24}, {16}, {8}, {0} } } },   /* SVGA3D_Q8W8V8U8 */
+
+       {SVGA3DBLOCKDESC_UV,
+        {1, 1, 1},  2, 2, {16, {{8}, {8}, {0}, {0} } },
+        {{{8}, {0}, {0}, {0} } } },     /* SVGA3D_CxV8U8 */
+
+       {SVGA3DBLOCKDESC_UVL,
+        {1, 1, 1},  4, 4, {24, {{8}, {8}, {8}, {0} } },
+        {{{16}, {8}, {0}, {0} } } },    /* SVGA3D_X8L8V8U8 */
+
+       {SVGA3DBLOCKDESC_UVWA,
+        {1, 1, 1},  4, 4, {32, {{10}, {10}, {10}, {2} } },
+        {{{0}, {10}, {20}, {30} } } },  /* SVGA3D_A2W10V10U10 */
+
+       {SVGA3DBLOCKDESC_ALPHA,
+        {1, 1, 1},  1, 1, {8, {{0}, {0}, {0}, {8} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_ALPHA8 */
+
+       {SVGA3DBLOCKDESC_R_FP,
+        {1, 1, 1},  2, 2, {16, {{0}, {0}, {16}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R_S10E5 */
+
+       {SVGA3DBLOCKDESC_R_FP,
+        {1, 1, 1},  4, 4, {32, {{0}, {0}, {32}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R_S23E8 */
+
+       {SVGA3DBLOCKDESC_RG_FP,
+        {1, 1, 1},  4, 4, {32, {{0}, {16}, {16}, {0} } },
+        {{{0}, {16}, {0}, {0} } } },    /* SVGA3D_RG_S10E5 */
+
+       {SVGA3DBLOCKDESC_RG_FP,
+        {1, 1, 1},  8, 8, {64, {{0}, {32}, {32}, {0} } },
+        {{{0}, {32}, {0}, {0} } } },    /* SVGA3D_RG_S23E8 */
+
+       {SVGA3DBLOCKDESC_BUFFER,
+        {1, 1, 1},  1, 1, {8, {{0}, {0}, {8}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BUFFER */
+
+       {SVGA3DBLOCKDESC_DEPTH,
+        {1, 1, 1},  4, 4, {32, {{0}, {0}, {24}, {0} } },
+        {{{0}, {24}, {0}, {0} } } },    /* SVGA3D_Z_D24X8 */
+
+       {SVGA3DBLOCKDESC_UV,
+        {1, 1, 1},  4, 4, {32, {{16}, {16}, {0}, {0} } },
+        {{{16}, {0}, {0}, {0} } } },    /* SVGA3D_V16U16 */
+
+       {SVGA3DBLOCKDESC_RG,
+        {1, 1, 1},  4, 4, {32, {{0}, {16}, {16}, {0} } },
+        {{{0}, {0}, {16}, {0} } } },    /* SVGA3D_G16R16 */
+
+       {SVGA3DBLOCKDESC_RGBA,
+        {1, 1, 1},  8, 8, {64, {{16}, {16}, {16}, {16} } },
+        {{{32}, {16}, {0}, {48} } } },  /* SVGA3D_A16B16G16R16 */
+
+       {SVGA3DBLOCKDESC_YUV,
+        {1, 1, 1},  2, 2, {16, {{8}, {0}, {8}, {0} } },
+        {{{0}, {0}, {8}, {0} } } },     /* SVGA3D_UYVY */
+
+       {SVGA3DBLOCKDESC_YUV,
+        {1, 1, 1},  2, 2, {16, {{8}, {0}, {8}, {0} } },
+        {{{8}, {0}, {0}, {0} } } },     /* SVGA3D_YUY2 */
+
+       {SVGA3DBLOCKDESC_NV12,
+        {2, 2, 1},  6, 2, {48, {{0}, {0}, {48}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_NV12 */
+
+       {SVGA3DBLOCKDESC_AYUV,
+        {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
+        {{{0}, {8}, {16}, {24} } } },   /* SVGA3D_AYUV */
+
+       {SVGA3DBLOCKDESC_RGBA,
+        {1, 1, 1},  16, 16, {128, {{32}, {32}, {32}, {32} } },
+        {{{64}, {32}, {0}, {96} } } },  /* SVGA3D_R32G32B32A32_TYPELESS */
+
+       {SVGA3DBLOCKDESC_RGBA,
+        {1, 1, 1},  16, 16, {128, {{32}, {32}, {32}, {32} } },
+        {{{64}, {32}, {0}, {96} } } },  /* SVGA3D_R32G32B32A32_UINT */
+
+       {SVGA3DBLOCKDESC_UVWQ,
+        {1, 1, 1},  16, 16, {128, {{32}, {32}, {32}, {32} } },
+        {{{64}, {32}, {0}, {96} } } },  /* SVGA3D_R32G32B32A32_SINT */
+
+       {SVGA3DBLOCKDESC_RGB,
+        {1, 1, 1},  12, 12, {96, {{32}, {32}, {32}, {0} } },
+        {{{64}, {32}, {0}, {0} } } },   /* SVGA3D_R32G32B32_TYPELESS */
+
+       {SVGA3DBLOCKDESC_RGB_FP,
+        {1, 1, 1},  12, 12, {96, {{32}, {32}, {32}, {0} } },
+        {{{64}, {32}, {0}, {0} } } },   /* SVGA3D_R32G32B32_FLOAT */
+
+       {SVGA3DBLOCKDESC_RGB,
+        {1, 1, 1},  12, 12, {96, {{32}, {32}, {32}, {0} } },
+        {{{64}, {32}, {0}, {0} } } },   /* SVGA3D_R32G32B32_UINT */
+
+       {SVGA3DBLOCKDESC_UVW,
+        {1, 1, 1},  12, 12, {96, {{32}, {32}, {32}, {0} } },
+        {{{64}, {32}, {0}, {0} } } },   /* SVGA3D_R32G32B32_SINT */
+
+       {SVGA3DBLOCKDESC_RGBA,
+        {1, 1, 1},  8, 8, {64, {{16}, {16}, {16}, {16} } },
+        {{{32}, {16}, {0}, {48} } } },  /* SVGA3D_R16G16B16A16_TYPELESS */
+
+       {SVGA3DBLOCKDESC_RGBA,
+        {1, 1, 1},  8, 8, {64, {{16}, {16}, {16}, {16} } },
+        {{{32}, {16}, {0}, {48} } } },  /* SVGA3D_R16G16B16A16_UINT */
+
+       {SVGA3DBLOCKDESC_UVWQ,
+        {1, 1, 1},  8, 8, {64, {{16}, {16}, {16}, {16} } },
+        {{{32}, {16}, {0}, {48} } } },  /* SVGA3D_R16G16B16A16_SNORM */
+
+       {SVGA3DBLOCKDESC_UVWQ,
+        {1, 1, 1},  8, 8, {64, {{16}, {16}, {16}, {16} } },
+        {{{32}, {16}, {0}, {48} } } },  /* SVGA3D_R16G16B16A16_SINT */
+
+       {SVGA3DBLOCKDESC_RG,
+        {1, 1, 1},  8, 8, {64, {{0}, {32}, {32}, {0} } },
+        {{{0}, {32}, {0}, {0} } } },    /* SVGA3D_R32G32_TYPELESS */
+
+       {SVGA3DBLOCKDESC_RG,
+        {1, 1, 1},  8, 8, {64, {{0}, {32}, {32}, {0} } },
+        {{{0}, {32}, {0}, {0} } } },    /* SVGA3D_R32G32_UINT */
+
+       {SVGA3DBLOCKDESC_UV,
+        {1, 1, 1},  8, 8, {64, {{0}, {32}, {32}, {0} } },
+        {{{0}, {32}, {0}, {0} } } },    /* SVGA3D_R32G32_SINT */
+
+       {SVGA3DBLOCKDESC_RG,
+        {1, 1, 1},  8, 8, {64, {{0}, {8}, {32}, {0} } },
+        {{{0}, {32}, {0}, {0} } } },    /* SVGA3D_R32G8X24_TYPELESS */
+
+       {SVGA3DBLOCKDESC_DS,
+        {1, 1, 1},  8, 8, {64, {{0}, {8}, {32}, {0} } },
+        {{{0}, {32}, {0}, {0} } } },    /* SVGA3D_D32_FLOAT_S8X24_UINT */
+
+       {SVGA3DBLOCKDESC_R_FP,
+        {1, 1, 1},  8, 8, {64, {{0}, {0}, {32}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },    /* SVGA3D_R32_FLOAT_X8_X24_TYPELESS */
+
+       {SVGA3DBLOCKDESC_GREEN,
+        {1, 1, 1},  8, 8, {64, {{0}, {8}, {0}, {0} } },
+        {{{0}, {32}, {0}, {0} } } },    /* SVGA3D_X32_TYPELESS_G8X24_UINT */
+
+       {SVGA3DBLOCKDESC_RGBA,
+        {1, 1, 1},  4, 4, {32, {{10}, {10}, {10}, {2} } },
+        {{{0}, {10}, {20}, {30} } } },  /* SVGA3D_R10G10B10A2_TYPELESS */
+
+       {SVGA3DBLOCKDESC_RGBA,
+        {1, 1, 1},  4, 4, {32, {{10}, {10}, {10}, {2} } },
+        {{{0}, {10}, {20}, {30} } } },  /* SVGA3D_R10G10B10A2_UINT */
+
+       {SVGA3DBLOCKDESC_RGB_FP,
+        {1, 1, 1},  4, 4, {32, {{10}, {11}, {11}, {0} } },
+        {{{0}, {10}, {21}, {0} } } },  /* SVGA3D_R11G11B10_FLOAT */
+
+       {SVGA3DBLOCKDESC_RGBA,
+        {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
+        {{{16}, {8}, {0}, {24} } } },   /* SVGA3D_R8G8B8A8_TYPELESS */
+
+       {SVGA3DBLOCKDESC_RGBA,
+        {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
+        {{{16}, {8}, {0}, {24} } } },   /* SVGA3D_R8G8B8A8_UNORM */
+
+       {SVGA3DBLOCKDESC_RGBA_SRGB,
+        {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
+        {{{16}, {8}, {0}, {24} } } },   /* SVGA3D_R8G8B8A8_UNORM_SRGB */
+
+       {SVGA3DBLOCKDESC_RGBA,
+        {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
+        {{{16}, {8}, {0}, {24} } } },   /* SVGA3D_R8G8B8A8_UINT */
+
+       {SVGA3DBLOCKDESC_RGBA,
+        {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
+        {{{16}, {8}, {0}, {24} } } },   /* SVGA3D_R8G8B8A8_SINT */
+
+       {SVGA3DBLOCKDESC_RG,
+        {1, 1, 1},  4, 4, {32, {{0}, {16}, {16}, {0} } },
+        {{{0}, {16}, {0}, {0} } } },    /* SVGA3D_R16G16_TYPELESS */
+
+       {SVGA3DBLOCKDESC_RG_FP,
+        {1, 1, 1},  4, 4, {32, {{0}, {16}, {16}, {0} } },
+        {{{0}, {16}, {0}, {0} } } },    /* SVGA3D_R16G16_UINT */
+
+       {SVGA3DBLOCKDESC_UV,
+        {1, 1, 1},  4, 4, {32, {{0}, {16}, {16}, {0} } },
+        {{{0}, {16}, {0}, {0} } } },    /* SVGA3D_R16G16_SINT */
+
+       {SVGA3DBLOCKDESC_RED,
+        {1, 1, 1},  4, 4, {32, {{0}, {0}, {32}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R32_TYPELESS */
+
+       {SVGA3DBLOCKDESC_DEPTH,
+        {1, 1, 1},  4, 4, {32, {{0}, {0}, {32}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_D32_FLOAT */
+
+       {SVGA3DBLOCKDESC_RED,
+        {1, 1, 1},  4, 4, {32, {{0}, {0}, {32}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R32_UINT */
+
+       {SVGA3DBLOCKDESC_RED,
+        {1, 1, 1},  4, 4, {32, {{0}, {0}, {32}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R32_SINT */
+
+       {SVGA3DBLOCKDESC_RG,
+        {1, 1, 1},  4, 4, {32, {{0}, {8}, {24}, {0} } },
+        {{{0}, {24}, {0}, {0} } } },    /* SVGA3D_R24G8_TYPELESS */
+
+       {SVGA3DBLOCKDESC_DS,
+        {1, 1, 1},  4, 4, {32, {{0}, {8}, {24}, {0} } },
+        {{{0}, {24}, {0}, {0} } } },    /* SVGA3D_D24_UNORM_S8_UINT */
+
+       {SVGA3DBLOCKDESC_RED,
+        {1, 1, 1},  4, 4, {32, {{0}, {0}, {24}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R24_UNORM_X8_TYPELESS */
+
+       {SVGA3DBLOCKDESC_GREEN,
+        {1, 1, 1},  4, 4, {32, {{0}, {8}, {0}, {0} } },
+        {{{0}, {24}, {0}, {0} } } },    /* SVGA3D_X24_TYPELESS_G8_UINT */
+
+       {SVGA3DBLOCKDESC_RG,
+        {1, 1, 1},  2, 2, {16, {{0}, {8}, {8}, {0} } },
+        {{{0}, {8}, {0}, {0} } } },     /* SVGA3D_R8G8_TYPELESS */
+
+       {SVGA3DBLOCKDESC_RG,
+        {1, 1, 1},  2, 2, {16, {{0}, {8}, {8}, {0} } },
+        {{{0}, {8}, {0}, {0} } } },     /* SVGA3D_R8G8_UNORM */
+
+       {SVGA3DBLOCKDESC_RG,
+        {1, 1, 1},  2, 2, {16, {{0}, {8}, {8}, {0} } },
+        {{{0}, {8}, {0}, {0} } } },     /* SVGA3D_R8G8_UINT */
+
+       {SVGA3DBLOCKDESC_UV,
+        {1, 1, 1},  2, 2, {16, {{0}, {8}, {8}, {0} } },
+        {{{0}, {8}, {0}, {0} } } },     /* SVGA3D_R8G8_SINT */
+
+       {SVGA3DBLOCKDESC_RED,
+        {1, 1, 1},  2, 2, {16, {{0}, {0}, {16}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R16_TYPELESS */
+
+       {SVGA3DBLOCKDESC_RED,
+        {1, 1, 1},  2, 2, {16, {{0}, {0}, {16}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R16_UNORM */
+
+       {SVGA3DBLOCKDESC_RED,
+        {1, 1, 1},  2, 2, {16, {{0}, {0}, {16}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R16_UINT */
+
+       {SVGA3DBLOCKDESC_U,
+        {1, 1, 1},  2, 2, {16, {{0}, {0}, {16}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R16_SNORM */
+
+       {SVGA3DBLOCKDESC_U,
+        {1, 1, 1},  2, 2, {16, {{0}, {0}, {16}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R16_SINT */
+
+       {SVGA3DBLOCKDESC_RED,
+        {1, 1, 1},  1, 1, {8, {{0}, {0}, {8}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R8_TYPELESS */
+
+       {SVGA3DBLOCKDESC_RED,
+        {1, 1, 1},  1, 1, {8, {{0}, {0}, {8}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R8_UNORM */
+
+       {SVGA3DBLOCKDESC_RED,
+        {1, 1, 1},  1, 1, {8, {{0}, {0}, {8}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R8_UINT */
+
+       {SVGA3DBLOCKDESC_U,
+        {1, 1, 1},  1, 1, {8, {{0}, {0}, {8}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R8_SNORM */
+
+       {SVGA3DBLOCKDESC_U,
+        {1, 1, 1},  1, 1, {8, {{0}, {0}, {8}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R8_SINT */
+
+       {SVGA3DBLOCKDESC_RED,
+        {8, 1, 1},  1, 1, {8, {{0}, {0}, {8}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_R1_UNORM */
+
+       {SVGA3DBLOCKDESC_RGBE,
+        {1, 1, 1},  4, 4, {32, {{9}, {9}, {9}, {5} } },
+        {{{18}, {9}, {0}, {27} } } },   /* SVGA3D_R9G9B9E5_SHAREDEXP */
+
+       {SVGA3DBLOCKDESC_RG,
+        {1, 1, 1},  2, 2, {16, {{0}, {8}, {8}, {0} } },
+        {{{0}, {8}, {0}, {0} } } },     /* SVGA3D_R8G8_B8G8_UNORM */
+
+       {SVGA3DBLOCKDESC_RG,
+        {1, 1, 1},  2, 2, {16, {{0}, {8}, {8}, {0} } },
+        {{{0}, {8}, {0}, {0} } } },     /* SVGA3D_G8R8_G8B8_UNORM */
+
+       {SVGA3DBLOCKDESC_COMPRESSED,
+        {4, 4, 1},  8, 8, {64, {{0}, {0}, {64}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC1_TYPELESS */
+
+       {SVGA3DBLOCKDESC_COMPRESSED_SRGB,
+        {4, 4, 1},  8, 8, {64, {{0}, {0}, {64}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC1_UNORM_SRGB */
+
+       {SVGA3DBLOCKDESC_COMPRESSED,
+        {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC2_TYPELESS */
+
+       {SVGA3DBLOCKDESC_COMPRESSED_SRGB,
+        {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC2_UNORM_SRGB */
+
+       {SVGA3DBLOCKDESC_COMPRESSED,
+        {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC3_TYPELESS */
+
+       {SVGA3DBLOCKDESC_COMPRESSED_SRGB,
+        {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC3_UNORM_SRGB */
+
+       {SVGA3DBLOCKDESC_COMPRESSED,
+        {4, 4, 1},  8, 8, {64, {{0}, {0}, {64}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC4_TYPELESS */
+
+       {SVGA3DBLOCKDESC_COMPRESSED,
+        {4, 4, 1},  8, 8, {64, {{0}, {0}, {64}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC4_UNORM */
+
+       {SVGA3DBLOCKDESC_COMPRESSED,
+        {4, 4, 1},  8, 8, {64, {{0}, {0}, {64}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC4_SNORM */
+
+       {SVGA3DBLOCKDESC_COMPRESSED,
+        {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC5_TYPELESS */
+
+       {SVGA3DBLOCKDESC_COMPRESSED,
+        {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC5_UNORM */
+
+       {SVGA3DBLOCKDESC_COMPRESSED,
+        {4, 4, 1},  16, 16, {128, {{0}, {0}, {128}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_BC5_SNORM */
+
+       {SVGA3DBLOCKDESC_RGBA,
+        {1, 1, 1},  4, 4, {32, {{10}, {10}, {10}, {2} } },
+        {{{0}, {10}, {20}, {30} } } },  /* SVGA3D_R10G10B10_XR_BIAS_A2_UNORM */
+
+       {SVGA3DBLOCKDESC_RGBA,
+        {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
+        {{{0}, {8}, {16}, {24} } } },   /* SVGA3D_B8G8R8A8_TYPELESS */
+
+       {SVGA3DBLOCKDESC_RGBA_SRGB,
+        {1, 1, 1},  4, 4, {32, {{8}, {8}, {8}, {8} } },
+        {{{0}, {8}, {16}, {24} } } },   /* SVGA3D_B8G8R8A8_UNORM_SRGB */
+
+       {SVGA3DBLOCKDESC_RGB,
+        {1, 1, 1},  4, 4, {24, {{8}, {8}, {8}, {0} } },
+        {{{0}, {8}, {16}, {24} } } },   /* SVGA3D_B8G8R8X8_TYPELESS */
+
+       {SVGA3DBLOCKDESC_RGB_SRGB,
+        {1, 1, 1},  4, 4, {24, {{8}, {8}, {8}, {0} } },
+        {{{0}, {8}, {16}, {24} } } },   /* SVGA3D_B8G8R8X8_UNORM_SRGB */
+
+       {SVGA3DBLOCKDESC_DEPTH,
+        {1, 1, 1},  2, 2, {16, {{0}, {0}, {16}, {0} } },
+        {{{0}, {0}, {0}, {0} } } },     /* SVGA3D_Z_DF16 */
+
+       {SVGA3DBLOCKDESC_DS,
+        {1, 1, 1},  4, 4, {32, {{0}, {8}, {24}, {0} } },
+        {{{0}, {24}, {0}, {0} } } },    /* SVGA3D_Z_DF24 */
+
+       {SVGA3DBLOCKDESC_DS,
+        {1, 1, 1},  4, 4, {32, {{0}, {8}, {24}, {0} } },
+        {{{0}, {24}, {0}, {0} } } },    /* SVGA3D_Z_D24S8_INT */
+};
+
+static inline u32 clamped_umul32(u32 a, u32 b)
+{
+       uint64_t tmp = (uint64_t) a*b;
+       return (tmp > (uint64_t) ((u32) -1)) ? (u32) -1 : tmp;
+}
+
+static inline const struct svga3d_surface_desc *
+svga3dsurface_get_desc(SVGA3dSurfaceFormat format)
+{
+       if (format < ARRAY_SIZE(svga3d_surface_descs))
+               return &svga3d_surface_descs[format];
+
+       return &svga3d_surface_descs[SVGA3D_FORMAT_INVALID];
+}
+
+/*
+ *----------------------------------------------------------------------
+ *
+ * svga3dsurface_get_mip_size --
+ *
+ *      Given a base level size and the mip level, compute the size of
+ *      the mip level.
+ *
+ * Results:
+ *      See above.
+ *
+ * Side effects:
+ *      None.
+ *
+ *----------------------------------------------------------------------
+ */
+
+static inline surf_size_struct
+svga3dsurface_get_mip_size(surf_size_struct base_level, u32 mip_level)
+{
+       surf_size_struct size;
+
+       size.width = max_t(u32, base_level.width >> mip_level, 1);
+       size.height = max_t(u32, base_level.height >> mip_level, 1);
+       size.depth = max_t(u32, base_level.depth >> mip_level, 1);
+       return size;
+}
+
+static inline void
+svga3dsurface_get_size_in_blocks(const struct svga3d_surface_desc *desc,
+                                const surf_size_struct *pixel_size,
+                                surf_size_struct *block_size)
+{
+       block_size->width = DIV_ROUND_UP(pixel_size->width,
+                                        desc->block_size.width);
+       block_size->height = DIV_ROUND_UP(pixel_size->height,
+                                         desc->block_size.height);
+       block_size->depth = DIV_ROUND_UP(pixel_size->depth,
+                                        desc->block_size.depth);
+}
+
+static inline bool
+svga3dsurface_is_planar_surface(const struct svga3d_surface_desc *desc)
+{
+       return (desc->block_desc & SVGA3DBLOCKDESC_PLANAR_YUV) != 0;
+}
+
+static inline u32
+svga3dsurface_calculate_pitch(const struct svga3d_surface_desc *desc,
+                             const surf_size_struct *size)
+{
+       u32 pitch;
+       surf_size_struct blocks;
+
+       svga3dsurface_get_size_in_blocks(desc, size, &blocks);
+
+       pitch = blocks.width * desc->pitch_bytes_per_block;
+
+       return pitch;
+}
+
+/*
+ *-----------------------------------------------------------------------------
+ *
+ * svga3dsurface_get_image_buffer_size --
+ *
+ *      Return the number of bytes of buffer space required to store
+ *      one image of a surface, optionally using the specified pitch.
+ *
+ *      If pitch is zero, it is assumed that rows are tightly packed.
+ *
+ *      This function is overflow-safe. If the result would have
+ *      overflowed, instead we return MAX_UINT32.
+ *
+ * Results:
+ *      Byte count.
+ *
+ * Side effects:
+ *      None.
+ *
+ *-----------------------------------------------------------------------------
+ */
+
+static inline u32
+svga3dsurface_get_image_buffer_size(const struct svga3d_surface_desc *desc,
+                                   const surf_size_struct *size,
+                                   u32 pitch)
+{
+       surf_size_struct image_blocks;
+       u32 slice_size, total_size;
+
+       svga3dsurface_get_size_in_blocks(desc, size, &image_blocks);
+
+       if (svga3dsurface_is_planar_surface(desc)) {
+               total_size = clamped_umul32(image_blocks.width,
+                                           image_blocks.height);
+               total_size = clamped_umul32(total_size, image_blocks.depth);
+               total_size = clamped_umul32(total_size, desc->bytes_per_block);
+               return total_size;
+       }
+
+       if (pitch == 0)
+               pitch = svga3dsurface_calculate_pitch(desc, size);
+
+       slice_size = clamped_umul32(image_blocks.height, pitch);
+       total_size = clamped_umul32(slice_size, image_blocks.depth);
+
+       return total_size;
+}
+
+static inline u32
+svga3dsurface_get_serialized_size(SVGA3dSurfaceFormat format,
+                                 surf_size_struct base_level_size,
+                                 u32 num_mip_levels,
+                                 bool cubemap)
+{
+       const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
+       u32 total_size = 0;
+       u32 mip;
+
+       for (mip = 0; mip < num_mip_levels; mip++) {
+               surf_size_struct size =
+                       svga3dsurface_get_mip_size(base_level_size, mip);
+               total_size += svga3dsurface_get_image_buffer_size(desc,
+                                                                 &size, 0);
+       }
+
+       if (cubemap)
+               total_size *= SVGA3D_MAX_SURFACE_FACES;
+
+       return total_size;
+}
+
+
+/**
+ * svga3dsurface_get_pixel_offset - Compute the offset (in bytes) to a pixel
+ * in an image (or volume).
+ *
+ * @width: The image width in pixels.
+ * @height: The image height in pixels
+ */
+static inline u32
+svga3dsurface_get_pixel_offset(SVGA3dSurfaceFormat format,
+                              u32 width, u32 height,
+                              u32 x, u32 y, u32 z)
+{
+       const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
+       const u32 bw = desc->block_size.width, bh = desc->block_size.height;
+       const u32 bd = desc->block_size.depth;
+       const u32 rowstride = DIV_ROUND_UP(width, bw) * desc->bytes_per_block;
+       const u32 imgstride = DIV_ROUND_UP(height, bh) * rowstride;
+       const u32 offset = (z / bd * imgstride +
+                           y / bh * rowstride +
+                           x / bw * desc->bytes_per_block);
+       return offset;
+}
+
+
+static inline u32
+svga3dsurface_get_image_offset(SVGA3dSurfaceFormat format,
+                              surf_size_struct baseLevelSize,
+                              u32 numMipLevels,
+                              u32 face,
+                              u32 mip)
+
+{
+       u32 offset;
+       u32 mipChainBytes;
+       u32 mipChainBytesToLevel;
+       u32 i;
+       const struct svga3d_surface_desc *desc;
+       surf_size_struct mipSize;
+       u32 bytes;
+
+       desc = svga3dsurface_get_desc(format);
+
+       mipChainBytes = 0;
+       mipChainBytesToLevel = 0;
+       for (i = 0; i < numMipLevels; i++) {
+               mipSize = svga3dsurface_get_mip_size(baseLevelSize, i);
+               bytes = svga3dsurface_get_image_buffer_size(desc, &mipSize, 0);
+               mipChainBytes += bytes;
+               if (i < mip)
+                       mipChainBytesToLevel += bytes;
+       }
+
+       offset = mipChainBytes * face + mipChainBytesToLevel;
+
+       return offset;
+}
index 9826fbc..96dc84d 100644 (file)
@@ -248,13 +248,12 @@ void vmw_evict_flags(struct ttm_buffer_object *bo,
        *placement = vmw_sys_placement;
 }
 
-/**
- * FIXME: Proper access checks on buffers.
- */
-
 static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
 {
-       return 0;
+       struct ttm_object_file *tfile =
+               vmw_fpriv((struct drm_file *)filp->private_data)->tfile;
+
+       return vmw_user_dmabuf_verify_access(bo, tfile);
 }
 
 static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
@@ -310,27 +309,23 @@ static void vmw_sync_obj_unref(void **sync_obj)
        vmw_fence_obj_unreference((struct vmw_fence_obj **) sync_obj);
 }
 
-static int vmw_sync_obj_flush(void *sync_obj, void *sync_arg)
+static int vmw_sync_obj_flush(void *sync_obj)
 {
        vmw_fence_obj_flush((struct vmw_fence_obj *) sync_obj);
        return 0;
 }
 
-static bool vmw_sync_obj_signaled(void *sync_obj, void *sync_arg)
+static bool vmw_sync_obj_signaled(void *sync_obj)
 {
-       unsigned long flags = (unsigned long) sync_arg;
        return  vmw_fence_obj_signaled((struct vmw_fence_obj *) sync_obj,
-                                      (uint32_t) flags);
+                                      DRM_VMW_FENCE_FLAG_EXEC);
 
 }
 
-static int vmw_sync_obj_wait(void *sync_obj, void *sync_arg,
-                            bool lazy, bool interruptible)
+static int vmw_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible)
 {
-       unsigned long flags = (unsigned long) sync_arg;
-
        return vmw_fence_obj_wait((struct vmw_fence_obj *) sync_obj,
-                                 (uint32_t) flags,
+                                 DRM_VMW_FENCE_FLAG_EXEC,
                                  lazy, interruptible,
                                  VMW_FENCE_WAIT_TIMEOUT);
 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
new file mode 100644 (file)
index 0000000..00ae092
--- /dev/null
@@ -0,0 +1,274 @@
+/**************************************************************************
+ *
+ * Copyright Â© 2009-2012 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+#include "vmwgfx_resource_priv.h"
+#include "ttm/ttm_placement.h"
+
+struct vmw_user_context {
+       struct ttm_base_object base;
+       struct vmw_resource res;
+};
+
+static void vmw_user_context_free(struct vmw_resource *res);
+static struct vmw_resource *
+vmw_user_context_base_to_res(struct ttm_base_object *base);
+
+static uint64_t vmw_user_context_size;
+
+static const struct vmw_user_resource_conv user_context_conv = {
+       .object_type = VMW_RES_CONTEXT,
+       .base_obj_to_res = vmw_user_context_base_to_res,
+       .res_free = vmw_user_context_free
+};
+
+const struct vmw_user_resource_conv *user_context_converter =
+       &user_context_conv;
+
+
+static const struct vmw_res_func vmw_legacy_context_func = {
+       .res_type = vmw_res_context,
+       .needs_backup = false,
+       .may_evict = false,
+       .type_name = "legacy contexts",
+       .backup_placement = NULL,
+       .create = NULL,
+       .destroy = NULL,
+       .bind = NULL,
+       .unbind = NULL
+};
+
+/**
+ * Context management:
+ */
+
+static void vmw_hw_context_destroy(struct vmw_resource *res)
+{
+
+       struct vmw_private *dev_priv = res->dev_priv;
+       struct {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdDestroyContext body;
+       } *cmd;
+
+
+       vmw_execbuf_release_pinned_bo(dev_priv);
+       cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+       if (unlikely(cmd == NULL)) {
+               DRM_ERROR("Failed reserving FIFO space for surface "
+                         "destruction.\n");
+               return;
+       }
+
+       cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
+       cmd->header.size = cpu_to_le32(sizeof(cmd->body));
+       cmd->body.cid = cpu_to_le32(res->id);
+
+       vmw_fifo_commit(dev_priv, sizeof(*cmd));
+       vmw_3d_resource_dec(dev_priv, false);
+}
+
+static int vmw_context_init(struct vmw_private *dev_priv,
+                           struct vmw_resource *res,
+                           void (*res_free) (struct vmw_resource *res))
+{
+       int ret;
+
+       struct {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdDefineContext body;
+       } *cmd;
+
+       ret = vmw_resource_init(dev_priv, res, false,
+                               res_free, &vmw_legacy_context_func);
+
+       if (unlikely(ret != 0)) {
+               DRM_ERROR("Failed to allocate a resource id.\n");
+               goto out_early;
+       }
+
+       if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) {
+               DRM_ERROR("Out of hw context ids.\n");
+               vmw_resource_unreference(&res);
+               return -ENOMEM;
+       }
+
+       cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+       if (unlikely(cmd == NULL)) {
+               DRM_ERROR("Fifo reserve failed.\n");
+               vmw_resource_unreference(&res);
+               return -ENOMEM;
+       }
+
+       cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
+       cmd->header.size = cpu_to_le32(sizeof(cmd->body));
+       cmd->body.cid = cpu_to_le32(res->id);
+
+       vmw_fifo_commit(dev_priv, sizeof(*cmd));
+       (void) vmw_3d_resource_inc(dev_priv, false);
+       vmw_resource_activate(res, vmw_hw_context_destroy);
+       return 0;
+
+out_early:
+       if (res_free == NULL)
+               kfree(res);
+       else
+               res_free(res);
+       return ret;
+}
+
+struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
+{
+       struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
+       int ret;
+
+       if (unlikely(res == NULL))
+               return NULL;
+
+       ret = vmw_context_init(dev_priv, res, NULL);
+
+       return (ret == 0) ? res : NULL;
+}
+
+/**
+ * User-space context management:
+ */
+
+static struct vmw_resource *
+vmw_user_context_base_to_res(struct ttm_base_object *base)
+{
+       return &(container_of(base, struct vmw_user_context, base)->res);
+}
+
+static void vmw_user_context_free(struct vmw_resource *res)
+{
+       struct vmw_user_context *ctx =
+           container_of(res, struct vmw_user_context, res);
+       struct vmw_private *dev_priv = res->dev_priv;
+
+       ttm_base_object_kfree(ctx, base);
+       ttm_mem_global_free(vmw_mem_glob(dev_priv),
+                           vmw_user_context_size);
+}
+
+/**
+ * This function is called when user space has no more references on the
+ * base object. It releases the base-object's reference on the resource object.
+ */
+
+static void vmw_user_context_base_release(struct ttm_base_object **p_base)
+{
+       struct ttm_base_object *base = *p_base;
+       struct vmw_user_context *ctx =
+           container_of(base, struct vmw_user_context, base);
+       struct vmw_resource *res = &ctx->res;
+
+       *p_base = NULL;
+       vmw_resource_unreference(&res);
+}
+
+int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
+                             struct drm_file *file_priv)
+{
+       struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
+       struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+
+       return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE);
+}
+
+int vmw_context_define_ioctl(struct drm_device *dev, void *data,
+                            struct drm_file *file_priv)
+{
+       struct vmw_private *dev_priv = vmw_priv(dev);
+       struct vmw_user_context *ctx;
+       struct vmw_resource *res;
+       struct vmw_resource *tmp;
+       struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
+       struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+       struct vmw_master *vmaster = vmw_master(file_priv->master);
+       int ret;
+
+
+       /*
+        * Approximate idr memory usage with 128 bytes. It will be limited
+        * by maximum number_of contexts anyway.
+        */
+
+       if (unlikely(vmw_user_context_size == 0))
+               vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128;
+
+       ret = ttm_read_lock(&vmaster->lock, true);
+       if (unlikely(ret != 0))
+               return ret;
+
+       ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
+                                  vmw_user_context_size,
+                                  false, true);
+       if (unlikely(ret != 0)) {
+               if (ret != -ERESTARTSYS)
+                       DRM_ERROR("Out of graphics memory for context"
+                                 " creation.\n");
+               goto out_unlock;
+       }
+
+       ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+       if (unlikely(ctx == NULL)) {
+               ttm_mem_global_free(vmw_mem_glob(dev_priv),
+                                   vmw_user_context_size);
+               ret = -ENOMEM;
+               goto out_unlock;
+       }
+
+       res = &ctx->res;
+       ctx->base.shareable = false;
+       ctx->base.tfile = NULL;
+
+       /*
+        * From here on, the destructor takes over resource freeing.
+        */
+
+       ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
+       if (unlikely(ret != 0))
+               goto out_unlock;
+
+       tmp = vmw_resource_reference(&ctx->res);
+       ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
+                                  &vmw_user_context_base_release, NULL);
+
+       if (unlikely(ret != 0)) {
+               vmw_resource_unreference(&tmp);
+               goto out_err;
+       }
+
+       arg->cid = ctx->base.hash.key;
+out_err:
+       vmw_resource_unreference(&res);
+out_unlock:
+       ttm_read_unlock(&vmaster->lock);
+       return ret;
+
+}
index d1498bf..5fae06a 100644 (file)
@@ -60,13 +60,13 @@ int vmw_dmabuf_to_placement(struct vmw_private *dev_priv,
        if (unlikely(ret != 0))
                return ret;
 
-       vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
+       vmw_execbuf_release_pinned_bo(dev_priv);
 
        ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
        if (unlikely(ret != 0))
                goto err;
 
-       ret = ttm_bo_validate(bo, placement, interruptible, false, false);
+       ret = ttm_bo_validate(bo, placement, interruptible, false);
 
        ttm_bo_unreserve(bo);
 
@@ -105,7 +105,7 @@ int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
                return ret;
 
        if (pin)
-               vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
+               vmw_execbuf_release_pinned_bo(dev_priv);
 
        ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
        if (unlikely(ret != 0))
@@ -123,7 +123,7 @@ int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
        else
                placement = &vmw_vram_gmr_placement;
 
-       ret = ttm_bo_validate(bo, placement, interruptible, false, false);
+       ret = ttm_bo_validate(bo, placement, interruptible, false);
        if (likely(ret == 0) || ret == -ERESTARTSYS)
                goto err_unreserve;
 
@@ -138,7 +138,7 @@ int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
        else
                placement = &vmw_vram_placement;
 
-       ret = ttm_bo_validate(bo, placement, interruptible, false, false);
+       ret = ttm_bo_validate(bo, placement, interruptible, false);
 
 err_unreserve:
        ttm_bo_unreserve(bo);
@@ -214,8 +214,7 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
                return ret;
 
        if (pin)
-               vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
-
+               vmw_execbuf_release_pinned_bo(dev_priv);
        ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
        if (unlikely(ret != 0))
                goto err_unlock;
@@ -224,10 +223,9 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
        if (bo->mem.mem_type == TTM_PL_VRAM &&
            bo->mem.start < bo->num_pages &&
            bo->mem.start > 0)
-               (void) ttm_bo_validate(bo, &vmw_sys_placement, false,
-                                      false, false);
+               (void) ttm_bo_validate(bo, &vmw_sys_placement, false, false);
 
-       ret = ttm_bo_validate(bo, &placement, interruptible, false, false);
+       ret = ttm_bo_validate(bo, &placement, interruptible, false);
 
        /* For some reason we didn't up at the start of vram */
        WARN_ON(ret == 0 && bo->offset != 0);
@@ -304,7 +302,7 @@ void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin)
        uint32_t old_mem_type = bo->mem.mem_type;
        int ret;
 
-       BUG_ON(!atomic_read(&bo->reserved));
+       BUG_ON(!ttm_bo_is_reserved(bo));
        BUG_ON(old_mem_type != TTM_PL_VRAM &&
               old_mem_type != VMW_PL_GMR);
 
@@ -316,7 +314,7 @@ void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin)
        placement.num_placement = 1;
        placement.placement = &pl_flags;
 
-       ret = ttm_bo_validate(bo, &placement, false, true, true);
+       ret = ttm_bo_validate(bo, &placement, false, true);
 
        BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
 }
index 2dd185e..161f8b2 100644 (file)
@@ -292,7 +292,7 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
                             PAGE_SIZE,
                             ttm_bo_type_device,
                             &vmw_vram_sys_placement,
-                            0, 0, false, NULL,
+                            0, false, NULL,
                             &dev_priv->dummy_query_bo);
 }
 
@@ -432,6 +432,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
        struct vmw_private *dev_priv;
        int ret;
        uint32_t svga_id;
+       enum vmw_res_type i;
 
        dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
        if (unlikely(dev_priv == NULL)) {
@@ -448,15 +449,18 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
        mutex_init(&dev_priv->cmdbuf_mutex);
        mutex_init(&dev_priv->release_mutex);
        rwlock_init(&dev_priv->resource_lock);
-       idr_init(&dev_priv->context_idr);
-       idr_init(&dev_priv->surface_idr);
-       idr_init(&dev_priv->stream_idr);
+
+       for (i = vmw_res_context; i < vmw_res_max; ++i) {
+               idr_init(&dev_priv->res_idr[i]);
+               INIT_LIST_HEAD(&dev_priv->res_lru[i]);
+       }
+
        mutex_init(&dev_priv->init_mutex);
        init_waitqueue_head(&dev_priv->fence_queue);
        init_waitqueue_head(&dev_priv->fifo_queue);
        dev_priv->fence_queue_waiters = 0;
        atomic_set(&dev_priv->fifo_queue_waiters, 0);
-       INIT_LIST_HEAD(&dev_priv->surface_lru);
+
        dev_priv->used_memory_size = 0;
 
        dev_priv->io_start = pci_resource_start(dev->pdev, 0);
@@ -609,14 +613,18 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
                }
        }
 
+       if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
+               ret = drm_irq_install(dev);
+               if (ret != 0) {
+                       DRM_ERROR("Failed installing irq: %d\n", ret);
+                       goto out_no_irq;
+               }
+       }
+
        dev_priv->fman = vmw_fence_manager_init(dev_priv);
        if (unlikely(dev_priv->fman == NULL))
                goto out_no_fman;
 
-       /* Need to start the fifo to check if we can do screen objects */
-       ret = vmw_3d_resource_inc(dev_priv, true);
-       if (unlikely(ret != 0))
-               goto out_no_fifo;
        vmw_kms_save_vga(dev_priv);
 
        /* Start kms and overlay systems, needs fifo. */
@@ -625,25 +633,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
                goto out_no_kms;
        vmw_overlay_init(dev_priv);
 
-       /* 3D Depends on Screen Objects being used. */
-       DRM_INFO("Detected %sdevice 3D availability.\n",
-                vmw_fifo_have_3d(dev_priv) ?
-                "" : "no ");
-
-       /* We might be done with the fifo now */
        if (dev_priv->enable_fb) {
+               ret = vmw_3d_resource_inc(dev_priv, true);
+               if (unlikely(ret != 0))
+                       goto out_no_fifo;
                vmw_fb_init(dev_priv);
-       } else {
-               vmw_kms_restore_vga(dev_priv);
-               vmw_3d_resource_dec(dev_priv, true);
-       }
-
-       if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
-               ret = drm_irq_install(dev);
-               if (unlikely(ret != 0)) {
-                       DRM_ERROR("Failed installing irq: %d\n", ret);
-                       goto out_no_irq;
-               }
        }
 
        dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
@@ -651,20 +645,16 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 
        return 0;
 
-out_no_irq:
-       if (dev_priv->enable_fb)
-               vmw_fb_close(dev_priv);
+out_no_fifo:
        vmw_overlay_close(dev_priv);
        vmw_kms_close(dev_priv);
 out_no_kms:
-       /* We still have a 3D resource reference held */
-       if (dev_priv->enable_fb) {
-               vmw_kms_restore_vga(dev_priv);
-               vmw_3d_resource_dec(dev_priv, false);
-       }
-out_no_fifo:
+       vmw_kms_restore_vga(dev_priv);
        vmw_fence_manager_takedown(dev_priv->fman);
 out_no_fman:
+       if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
+               drm_irq_uninstall(dev_priv->dev);
+out_no_irq:
        if (dev_priv->stealth)
                pci_release_region(dev->pdev, 2);
        else
@@ -684,9 +674,9 @@ out_err2:
 out_err1:
        vmw_ttm_global_release(dev_priv);
 out_err0:
-       idr_destroy(&dev_priv->surface_idr);
-       idr_destroy(&dev_priv->context_idr);
-       idr_destroy(&dev_priv->stream_idr);
+       for (i = vmw_res_context; i < vmw_res_max; ++i)
+               idr_destroy(&dev_priv->res_idr[i]);
+
        kfree(dev_priv);
        return ret;
 }
@@ -694,13 +684,14 @@ out_err0:
 static int vmw_driver_unload(struct drm_device *dev)
 {
        struct vmw_private *dev_priv = vmw_priv(dev);
+       enum vmw_res_type i;
 
        unregister_pm_notifier(&dev_priv->pm_nb);
 
+       if (dev_priv->ctx.res_ht_initialized)
+               drm_ht_remove(&dev_priv->ctx.res_ht);
        if (dev_priv->ctx.cmd_bounce)
                vfree(dev_priv->ctx.cmd_bounce);
-       if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
-               drm_irq_uninstall(dev_priv->dev);
        if (dev_priv->enable_fb) {
                vmw_fb_close(dev_priv);
                vmw_kms_restore_vga(dev_priv);
@@ -709,6 +700,8 @@ static int vmw_driver_unload(struct drm_device *dev)
        vmw_kms_close(dev_priv);
        vmw_overlay_close(dev_priv);
        vmw_fence_manager_takedown(dev_priv->fman);
+       if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
+               drm_irq_uninstall(dev_priv->dev);
        if (dev_priv->stealth)
                pci_release_region(dev->pdev, 2);
        else
@@ -723,9 +716,9 @@ static int vmw_driver_unload(struct drm_device *dev)
        (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
        (void)ttm_bo_device_release(&dev_priv->bdev);
        vmw_ttm_global_release(dev_priv);
-       idr_destroy(&dev_priv->surface_idr);
-       idr_destroy(&dev_priv->context_idr);
-       idr_destroy(&dev_priv->stream_idr);
+
+       for (i = vmw_res_context; i < vmw_res_max; ++i)
+               idr_destroy(&dev_priv->res_idr[i]);
 
        kfree(dev_priv);
 
@@ -924,11 +917,11 @@ static int vmw_master_set(struct drm_device *dev,
 
 out_no_active_lock:
        if (!dev_priv->enable_fb) {
+               vmw_kms_restore_vga(dev_priv);
+               vmw_3d_resource_dec(dev_priv, true);
                mutex_lock(&dev_priv->hw_mutex);
                vmw_write(dev_priv, SVGA_REG_TRACES, 1);
                mutex_unlock(&dev_priv->hw_mutex);
-               vmw_kms_restore_vga(dev_priv);
-               vmw_3d_resource_dec(dev_priv, true);
        }
        return ret;
 }
@@ -949,7 +942,7 @@ static void vmw_master_drop(struct drm_device *dev,
 
        vmw_fp->locked_master = drm_master_get(file_priv->master);
        ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
-       vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
+       vmw_execbuf_release_pinned_bo(dev_priv);
 
        if (unlikely((ret != 0))) {
                DRM_ERROR("Unable to lock TTM at VT switch.\n");
@@ -962,11 +955,11 @@ static void vmw_master_drop(struct drm_device *dev,
                ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
                if (unlikely(ret != 0))
                        DRM_ERROR("Unable to clean VRAM on master drop.\n");
+               vmw_kms_restore_vga(dev_priv);
+               vmw_3d_resource_dec(dev_priv, true);
                mutex_lock(&dev_priv->hw_mutex);
                vmw_write(dev_priv, SVGA_REG_TRACES, 1);
                mutex_unlock(&dev_priv->hw_mutex);
-               vmw_kms_restore_vga(dev_priv);
-               vmw_3d_resource_dec(dev_priv, true);
        }
 
        dev_priv->active_master = &dev_priv->fbdev_master;
@@ -1001,7 +994,8 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
                 * This empties VRAM and unbinds all GMR bindings.
                 * Buffer contents is moved to swappable memory.
                 */
-               vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
+               vmw_execbuf_release_pinned_bo(dev_priv);
+               vmw_resource_evict_all(dev_priv);
                ttm_bo_swapout_all(&dev_priv->bdev);
 
                break;
index 88a179e..13aeda7 100644 (file)
@@ -67,31 +67,46 @@ struct vmw_fpriv {
 
 struct vmw_dma_buffer {
        struct ttm_buffer_object base;
-       struct list_head validate_list;
-       bool gmr_bound;
-       uint32_t cur_validate_node;
-       bool on_validate_list;
+       struct list_head res_list;
 };
 
+/**
+ * struct vmw_validate_buffer - Carries validation info about buffers.
+ *
+ * @base: Validation info for TTM.
+ * @hash: Hash entry for quick lookup of the TTM buffer object.
+ *
+ * This structure contains also driver private validation info
+ * on top of the info needed by TTM.
+ */
+struct vmw_validate_buffer {
+       struct ttm_validate_buffer base;
+       struct drm_hash_item hash;
+};
+
+struct vmw_res_func;
 struct vmw_resource {
        struct kref kref;
        struct vmw_private *dev_priv;
-       struct idr *idr;
        int id;
-       enum ttm_object_type res_type;
        bool avail;
-       void (*remove_from_lists) (struct vmw_resource *res);
-       void (*hw_destroy) (struct vmw_resource *res);
+       unsigned long backup_size;
+       bool res_dirty; /* Protected by backup buffer reserved */
+       bool backup_dirty; /* Protected by backup buffer reserved */
+       struct vmw_dma_buffer *backup;
+       unsigned long backup_offset;
+       const struct vmw_res_func *func;
+       struct list_head lru_head; /* Protected by the resource lock */
+       struct list_head mob_head; /* Protected by @backup reserved */
        void (*res_free) (struct vmw_resource *res);
-       struct list_head validate_head;
-       struct list_head query_head; /* Protected by the cmdbuf mutex */
-       /* TODO is a generic snooper needed? */
-#if 0
-       void (*snoop)(struct vmw_resource *res,
-                     struct ttm_object_file *tfile,
-                     SVGA3dCmdHeader *header);
-       void *snoop_priv;
-#endif
+       void (*hw_destroy) (struct vmw_resource *res);
+};
+
+enum vmw_res_type {
+       vmw_res_context,
+       vmw_res_surface,
+       vmw_res_stream,
+       vmw_res_max
 };
 
 struct vmw_cursor_snooper {
@@ -105,20 +120,18 @@ struct vmw_surface_offset;
 
 struct vmw_surface {
        struct vmw_resource res;
-       struct list_head lru_head; /* Protected by the resource lock */
        uint32_t flags;
        uint32_t format;
        uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
+       struct drm_vmw_size base_size;
        struct drm_vmw_size *sizes;
        uint32_t num_sizes;
-
        bool scanout;
-
        /* TODO so far just a extra pointer */
        struct vmw_cursor_snooper snooper;
-       struct ttm_buffer_object *backup;
        struct vmw_surface_offset *offsets;
-       uint32_t backup_size;
+       SVGA3dTextureFilter autogen_filter;
+       uint32_t multisample_count;
 };
 
 struct vmw_marker_queue {
@@ -145,29 +158,46 @@ struct vmw_relocation {
        uint32_t index;
 };
 
+/**
+ * struct vmw_res_cache_entry - resource information cache entry
+ *
+ * @valid: Whether the entry is valid, which also implies that the execbuf
+ * code holds a reference to the resource, and it's placed on the
+ * validation list.
+ * @handle: User-space handle of a resource.
+ * @res: Non-ref-counted pointer to the resource.
+ *
+ * Used to avoid frequent repeated user-space handle lookups of the
+ * same resource.
+ */
+struct vmw_res_cache_entry {
+       bool valid;
+       uint32_t handle;
+       struct vmw_resource *res;
+       struct vmw_resource_val_node *node;
+};
+
 struct vmw_sw_context{
-       struct ida bo_list;
-       uint32_t last_cid;
-       bool cid_valid;
+       struct drm_open_hash res_ht;
+       bool res_ht_initialized;
        bool kernel; /**< is the called made from the kernel */
-       struct vmw_resource *cur_ctx;
-       uint32_t last_sid;
-       uint32_t sid_translation;
-       bool sid_valid;
        struct ttm_object_file *tfile;
        struct list_head validate_nodes;
        struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
        uint32_t cur_reloc;
-       struct ttm_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS];
+       struct vmw_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS];
        uint32_t cur_val_buf;
        uint32_t *cmd_bounce;
        uint32_t cmd_bounce_size;
        struct list_head resource_list;
        uint32_t fence_flags;
-       struct list_head query_list;
        struct ttm_buffer_object *cur_query_bo;
-       uint32_t cur_query_cid;
-       bool query_cid_valid;
+       struct list_head res_relocations;
+       uint32_t *buf_start;
+       struct vmw_res_cache_entry res_cache[vmw_res_max];
+       struct vmw_resource *last_query_ctx;
+       bool needs_post_query_barrier;
+       struct vmw_resource *error_resource;
 };
 
 struct vmw_legacy_display;
@@ -242,10 +272,7 @@ struct vmw_private {
         */
 
        rwlock_t resource_lock;
-       struct idr context_idr;
-       struct idr surface_idr;
-       struct idr stream_idr;
-
+       struct idr res_idr[vmw_res_max];
        /*
         * Block lastclose from racing with firstopen.
         */
@@ -320,6 +347,7 @@ struct vmw_private {
        struct ttm_buffer_object *dummy_query_bo;
        struct ttm_buffer_object *pinned_bo;
        uint32_t query_cid;
+       uint32_t query_cid_valid;
        bool dummy_query_bo_pinned;
 
        /*
@@ -329,10 +357,15 @@ struct vmw_private {
         * protected by the cmdbuf mutex for simplicity.
         */
 
-       struct list_head surface_lru;
+       struct list_head res_lru[vmw_res_max];
        uint32_t used_memory_size;
 };
 
+static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
+{
+       return container_of(res, struct vmw_surface, res);
+}
+
 static inline struct vmw_private *vmw_priv(struct drm_device *dev)
 {
        return (struct vmw_private *)dev->dev_private;
@@ -381,10 +414,16 @@ extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
 /**
  * Resource utilities - vmwgfx_resource.c
  */
+struct vmw_user_resource_conv;
+extern const struct vmw_user_resource_conv *user_surface_converter;
+extern const struct vmw_user_resource_conv *user_context_converter;
 
 extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv);
 extern void vmw_resource_unreference(struct vmw_resource **p_res);
 extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
+extern int vmw_resource_validate(struct vmw_resource *res);
+extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup);
+extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
 extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
                                     struct drm_file *file_priv);
 extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
@@ -398,14 +437,13 @@ extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
                                  uint32_t handle,
                                  struct vmw_surface **out_surf,
                                  struct vmw_dma_buffer **out_buf);
+extern int vmw_user_resource_lookup_handle(
+       struct vmw_private *dev_priv,
+       struct ttm_object_file *tfile,
+       uint32_t handle,
+       const struct vmw_user_resource_conv *converter,
+       struct vmw_resource **p_res);
 extern void vmw_surface_res_free(struct vmw_resource *res);
-extern int vmw_surface_init(struct vmw_private *dev_priv,
-                           struct vmw_surface *srf,
-                           void (*res_free) (struct vmw_resource *res));
-extern int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
-                                         struct ttm_object_file *tfile,
-                                         uint32_t handle,
-                                         struct vmw_surface **out);
 extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
                                     struct drm_file *file_priv);
 extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
@@ -423,6 +461,8 @@ extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
                           size_t size, struct ttm_placement *placement,
                           bool interuptable,
                           void (*bo_free) (struct ttm_buffer_object *bo));
+extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
+                                 struct ttm_object_file *tfile);
 extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
                                  struct drm_file *file_priv);
 extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
@@ -440,7 +480,14 @@ extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
                                  struct ttm_object_file *tfile,
                                  uint32_t *inout_id,
                                  struct vmw_resource **out);
-extern void vmw_resource_unreserve(struct list_head *list);
+extern void vmw_resource_unreserve(struct vmw_resource *res,
+                                  struct vmw_dma_buffer *new_backup,
+                                  unsigned long new_backup_offset);
+extern void vmw_resource_move_notify(struct ttm_buffer_object *bo,
+                                    struct ttm_mem_reg *mem);
+extern void vmw_fence_single_bo(struct ttm_buffer_object *bo,
+                               struct vmw_fence_obj *fence);
+extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
 
 /**
  * DMA buffer helper routines - vmwgfx_dmabuf.c
@@ -538,10 +585,9 @@ extern int vmw_execbuf_process(struct drm_file *file_priv,
                               struct drm_vmw_fence_rep __user
                               *user_fence_rep,
                               struct vmw_fence_obj **out_fence);
-
-extern void
-vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
-                             bool only_on_cid_match, uint32_t cid);
+extern void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
+                                           struct vmw_fence_obj *fence);
+extern void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv);
 
 extern int vmw_execbuf_fence_commands(struct drm_file *file_priv,
                                      struct vmw_private *dev_priv,
@@ -699,10 +745,13 @@ static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf)
 static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf)
 {
        struct vmw_dma_buffer *tmp_buf = *buf;
-       struct ttm_buffer_object *bo = &tmp_buf->base;
+
        *buf = NULL;
+       if (tmp_buf != NULL) {
+               struct ttm_buffer_object *bo = &tmp_buf->base;
 
-       ttm_bo_unref(&bo);
+               ttm_bo_unref(&bo);
+       }
 }
 
 static inline struct vmw_dma_buffer *vmw_dmabuf_reference(struct vmw_dma_buffer *buf)
index 30654b4..394e647 100644 (file)
 #include <drm/ttm/ttm_bo_api.h>
 #include <drm/ttm/ttm_placement.h>
 
+#define VMW_RES_HT_ORDER 12
+
+/**
+ * struct vmw_resource_relocation - Relocation info for resources
+ *
+ * @head: List head for the software context's relocation list.
+ * @res: Non-ref-counted pointer to the resource.
+ * @offset: Offset of 4 byte entries into the command buffer where the
+ * id that needs fixup is located.
+ */
+struct vmw_resource_relocation {
+       struct list_head head;
+       const struct vmw_resource *res;
+       unsigned long offset;
+};
+
+/**
+ * struct vmw_resource_val_node - Validation info for resources
+ *
+ * @head: List head for the software context's resource list.
+ * @hash: Hash entry for quick resouce to val_node lookup.
+ * @res: Ref-counted pointer to the resource.
+ * @switch_backup: Boolean whether to switch backup buffer on unreserve.
+ * @new_backup: Refcounted pointer to the new backup buffer.
+ * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
+ * @first_usage: Set to true the first time the resource is referenced in
+ * the command stream.
+ * @no_buffer_needed: Resources do not need to allocate buffer backup on
+ * reservation. The command stream will provide one.
+ */
+struct vmw_resource_val_node {
+       struct list_head head;
+       struct drm_hash_item hash;
+       struct vmw_resource *res;
+       struct vmw_dma_buffer *new_backup;
+       unsigned long new_backup_offset;
+       bool first_usage;
+       bool no_buffer_needed;
+};
+
+/**
+ * vmw_resource_unreserve - unreserve resources previously reserved for
+ * command submission.
+ *
+ * @list_head: list of resources to unreserve.
+ * @backoff: Whether command submission failed.
+ */
+static void vmw_resource_list_unreserve(struct list_head *list,
+                                       bool backoff)
+{
+       struct vmw_resource_val_node *val;
+
+       list_for_each_entry(val, list, head) {
+               struct vmw_resource *res = val->res;
+               struct vmw_dma_buffer *new_backup =
+                       backoff ? NULL : val->new_backup;
+
+               vmw_resource_unreserve(res, new_backup,
+                       val->new_backup_offset);
+               vmw_dmabuf_unreference(&val->new_backup);
+       }
+}
+
+
+/**
+ * vmw_resource_val_add - Add a resource to the software context's
+ * resource list if it's not already on it.
+ *
+ * @sw_context: Pointer to the software context.
+ * @res: Pointer to the resource.
+ * @p_node On successful return points to a valid pointer to a
+ * struct vmw_resource_val_node, if non-NULL on entry.
+ */
+static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
+                               struct vmw_resource *res,
+                               struct vmw_resource_val_node **p_node)
+{
+       struct vmw_resource_val_node *node;
+       struct drm_hash_item *hash;
+       int ret;
+
+       if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res,
+                                   &hash) == 0)) {
+               node = container_of(hash, struct vmw_resource_val_node, hash);
+               node->first_usage = false;
+               if (unlikely(p_node != NULL))
+                       *p_node = node;
+               return 0;
+       }
+
+       node = kzalloc(sizeof(*node), GFP_KERNEL);
+       if (unlikely(node == NULL)) {
+               DRM_ERROR("Failed to allocate a resource validation "
+                         "entry.\n");
+               return -ENOMEM;
+       }
+
+       node->hash.key = (unsigned long) res;
+       ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash);
+       if (unlikely(ret != 0)) {
+               DRM_ERROR("Failed to initialize a resource validation "
+                         "entry.\n");
+               kfree(node);
+               return ret;
+       }
+       list_add_tail(&node->head, &sw_context->resource_list);
+       node->res = vmw_resource_reference(res);
+       node->first_usage = true;
+
+       if (unlikely(p_node != NULL))
+               *p_node = node;
+
+       return 0;
+}
+
+/**
+ * vmw_resource_relocation_add - Add a relocation to the relocation list
+ *
+ * @list: Pointer to head of relocation list.
+ * @res: The resource.
+ * @offset: Offset into the command buffer currently being parsed where the
+ * id that needs fixup is located. Granularity is 4 bytes.
+ */
+static int vmw_resource_relocation_add(struct list_head *list,
+                                      const struct vmw_resource *res,
+                                      unsigned long offset)
+{
+       struct vmw_resource_relocation *rel;
+
+       rel = kmalloc(sizeof(*rel), GFP_KERNEL);
+       if (unlikely(rel == NULL)) {
+               DRM_ERROR("Failed to allocate a resource relocation.\n");
+               return -ENOMEM;
+       }
+
+       rel->res = res;
+       rel->offset = offset;
+       list_add_tail(&rel->head, list);
+
+       return 0;
+}
+
+/**
+ * vmw_resource_relocations_free - Free all relocations on a list
+ *
+ * @list: Pointer to the head of the relocation list.
+ */
+static void vmw_resource_relocations_free(struct list_head *list)
+{
+       struct vmw_resource_relocation *rel, *n;
+
+       list_for_each_entry_safe(rel, n, list, head) {
+               list_del(&rel->head);
+               kfree(rel);
+       }
+}
+
+/**
+ * vmw_resource_relocations_apply - Apply all relocations on a list
+ *
+ * @cb: Pointer to the start of the command buffer bein patch. This need
+ * not be the same buffer as the one being parsed when the relocation
+ * list was built, but the contents must be the same modulo the
+ * resource ids.
+ * @list: Pointer to the head of the relocation list.
+ */
+static void vmw_resource_relocations_apply(uint32_t *cb,
+                                          struct list_head *list)
+{
+       struct vmw_resource_relocation *rel;
+
+       list_for_each_entry(rel, list, head)
+               cb[rel->offset] = rel->res->id;
+}
+
 static int vmw_cmd_invalid(struct vmw_private *dev_priv,
                           struct vmw_sw_context *sw_context,
                           SVGA3dCmdHeader *header)
@@ -44,25 +219,11 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv,
        return 0;
 }
 
-static void vmw_resource_to_validate_list(struct vmw_sw_context *sw_context,
-                                         struct vmw_resource **p_res)
-{
-       struct vmw_resource *res = *p_res;
-
-       if (list_empty(&res->validate_head)) {
-               list_add_tail(&res->validate_head, &sw_context->resource_list);
-               *p_res = NULL;
-       } else
-               vmw_resource_unreference(p_res);
-}
-
 /**
  * vmw_bo_to_validate_list - add a bo to a validate list
  *
  * @sw_context: The software context used for this command submission batch.
  * @bo: The buffer object to add.
- * @fence_flags: Fence flags to be or'ed with any other fence flags for
- * this buffer on this submission batch.
  * @p_val_node: If non-NULL Will be updated with the validate node number
  * on return.
  *
@@ -71,31 +232,43 @@ static void vmw_resource_to_validate_list(struct vmw_sw_context *sw_context,
  */
 static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
                                   struct ttm_buffer_object *bo,
-                                  uint32_t fence_flags,
                                   uint32_t *p_val_node)
 {
        uint32_t val_node;
+       struct vmw_validate_buffer *vval_buf;
        struct ttm_validate_buffer *val_buf;
+       struct drm_hash_item *hash;
+       int ret;
 
-       val_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf);
-
-       if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
-               DRM_ERROR("Max number of DMA buffers per submission"
-                         " exceeded.\n");
-               return -EINVAL;
-       }
-
-       val_buf = &sw_context->val_bufs[val_node];
-       if (unlikely(val_node == sw_context->cur_val_buf)) {
-               val_buf->new_sync_obj_arg = NULL;
+       if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) bo,
+                                   &hash) == 0)) {
+               vval_buf = container_of(hash, struct vmw_validate_buffer,
+                                       hash);
+               val_buf = &vval_buf->base;
+               val_node = vval_buf - sw_context->val_bufs;
+       } else {
+               val_node = sw_context->cur_val_buf;
+               if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
+                       DRM_ERROR("Max number of DMA buffers per submission "
+                                 "exceeded.\n");
+                       return -EINVAL;
+               }
+               vval_buf = &sw_context->val_bufs[val_node];
+               vval_buf->hash.key = (unsigned long) bo;
+               ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
+               if (unlikely(ret != 0)) {
+                       DRM_ERROR("Failed to initialize a buffer validation "
+                                 "entry.\n");
+                       return ret;
+               }
+               ++sw_context->cur_val_buf;
+               val_buf = &vval_buf->base;
                val_buf->bo = ttm_bo_reference(bo);
+               val_buf->reserved = false;
                list_add_tail(&val_buf->head, &sw_context->validate_nodes);
-               ++sw_context->cur_val_buf;
        }
 
-       val_buf->new_sync_obj_arg = (void *)
-               ((unsigned long) val_buf->new_sync_obj_arg | fence_flags);
-       sw_context->fence_flags |= fence_flags;
+       sw_context->fence_flags |= DRM_VMW_FENCE_FLAG_EXEC;
 
        if (p_val_node)
                *p_val_node = val_node;
@@ -103,85 +276,174 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
        return 0;
 }
 
-static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
-                            struct vmw_sw_context *sw_context,
-                            SVGA3dCmdHeader *header)
+/**
+ * vmw_resources_reserve - Reserve all resources on the sw_context's
+ * resource list.
+ *
+ * @sw_context: Pointer to the software context.
+ *
+ * Note that since vmware's command submission currently is protected by
+ * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
+ * since only a single thread at once will attempt this.
+ */
+static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
 {
-       struct vmw_resource *ctx;
-
-       struct vmw_cid_cmd {
-               SVGA3dCmdHeader header;
-               __le32 cid;
-       } *cmd;
+       struct vmw_resource_val_node *val;
        int ret;
 
-       cmd = container_of(header, struct vmw_cid_cmd, header);
-       if (likely(sw_context->cid_valid && cmd->cid == sw_context->last_cid))
-               return 0;
+       list_for_each_entry(val, &sw_context->resource_list, head) {
+               struct vmw_resource *res = val->res;
 
-       ret = vmw_context_check(dev_priv, sw_context->tfile, cmd->cid,
-                               &ctx);
-       if (unlikely(ret != 0)) {
-               DRM_ERROR("Could not find or use context %u\n",
-                         (unsigned) cmd->cid);
-               return ret;
+               ret = vmw_resource_reserve(res, val->no_buffer_needed);
+               if (unlikely(ret != 0))
+                       return ret;
+
+               if (res->backup) {
+                       struct ttm_buffer_object *bo = &res->backup->base;
+
+                       ret = vmw_bo_to_validate_list
+                               (sw_context, bo, NULL);
+
+                       if (unlikely(ret != 0))
+                               return ret;
+               }
        }
+       return 0;
+}
 
-       sw_context->last_cid = cmd->cid;
-       sw_context->cid_valid = true;
-       sw_context->cur_ctx = ctx;
-       vmw_resource_to_validate_list(sw_context, &ctx);
+/**
+ * vmw_resources_validate - Validate all resources on the sw_context's
+ * resource list.
+ *
+ * @sw_context: Pointer to the software context.
+ *
+ * Before this function is called, all resource backup buffers must have
+ * been validated.
+ */
+static int vmw_resources_validate(struct vmw_sw_context *sw_context)
+{
+       struct vmw_resource_val_node *val;
+       int ret;
+
+       list_for_each_entry(val, &sw_context->resource_list, head) {
+               struct vmw_resource *res = val->res;
 
+               ret = vmw_resource_validate(res);
+               if (unlikely(ret != 0)) {
+                       if (ret != -ERESTARTSYS)
+                               DRM_ERROR("Failed to validate resource.\n");
+                       return ret;
+               }
+       }
        return 0;
 }
 
-static int vmw_cmd_sid_check(struct vmw_private *dev_priv,
+/**
+ * vmw_cmd_res_check - Check that a resource is present and if so, put it
+ * on the resource validate list unless it's already there.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @sw_context: Pointer to the software context.
+ * @res_type: Resource type.
+ * @converter: User-space visisble type specific information.
+ * @id: Pointer to the location in the command buffer currently being
+ * parsed from where the user-space resource id handle is located.
+ */
+static int vmw_cmd_res_check(struct vmw_private *dev_priv,
                             struct vmw_sw_context *sw_context,
-                            uint32_t *sid)
+                            enum vmw_res_type res_type,
+                            const struct vmw_user_resource_conv *converter,
+                            uint32_t *id,
+                            struct vmw_resource_val_node **p_val)
 {
-       struct vmw_surface *srf;
-       int ret;
+       struct vmw_res_cache_entry *rcache =
+               &sw_context->res_cache[res_type];
        struct vmw_resource *res;
+       struct vmw_resource_val_node *node;
+       int ret;
 
-       if (*sid == SVGA3D_INVALID_ID)
+       if (*id == SVGA3D_INVALID_ID)
                return 0;
 
-       if (likely((sw_context->sid_valid  &&
-                     *sid == sw_context->last_sid))) {
-               *sid = sw_context->sid_translation;
-               return 0;
-       }
+       /*
+        * Fastpath in case of repeated commands referencing the same
+        * resource
+        */
 
-       ret = vmw_user_surface_lookup_handle(dev_priv,
-                                            sw_context->tfile,
-                                            *sid, &srf);
-       if (unlikely(ret != 0)) {
-               DRM_ERROR("Could ot find or use surface 0x%08x "
-                         "address 0x%08lx\n",
-                         (unsigned int) *sid,
-                         (unsigned long) sid);
-               return ret;
+       if (likely(rcache->valid && *id == rcache->handle)) {
+               const struct vmw_resource *res = rcache->res;
+
+               rcache->node->first_usage = false;
+               if (p_val)
+                       *p_val = rcache->node;
+
+               return vmw_resource_relocation_add
+                       (&sw_context->res_relocations, res,
+                        id - sw_context->buf_start);
        }
 
-       ret = vmw_surface_validate(dev_priv, srf);
+       ret = vmw_user_resource_lookup_handle(dev_priv,
+                                             sw_context->tfile,
+                                             *id,
+                                             converter,
+                                             &res);
        if (unlikely(ret != 0)) {
-               if (ret != -ERESTARTSYS)
-                       DRM_ERROR("Could not validate surface.\n");
-               vmw_surface_unreference(&srf);
+               DRM_ERROR("Could not find or use resource 0x%08x.\n",
+                         (unsigned) *id);
+               dump_stack();
                return ret;
        }
 
-       sw_context->last_sid = *sid;
-       sw_context->sid_valid = true;
-       sw_context->sid_translation = srf->res.id;
-       *sid = sw_context->sid_translation;
+       rcache->valid = true;
+       rcache->res = res;
+       rcache->handle = *id;
 
-       res = &srf->res;
-       vmw_resource_to_validate_list(sw_context, &res);
+       ret = vmw_resource_relocation_add(&sw_context->res_relocations,
+                                         res,
+                                         id - sw_context->buf_start);
+       if (unlikely(ret != 0))
+               goto out_no_reloc;
+
+       ret = vmw_resource_val_add(sw_context, res, &node);
+       if (unlikely(ret != 0))
+               goto out_no_reloc;
 
+       rcache->node = node;
+       if (p_val)
+               *p_val = node;
+       vmw_resource_unreference(&res);
        return 0;
+
+out_no_reloc:
+       BUG_ON(sw_context->error_resource != NULL);
+       sw_context->error_resource = res;
+
+       return ret;
 }
 
+/**
+ * vmw_cmd_cid_check - Check a command header for valid context information.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @sw_context: Pointer to the software context.
+ * @header: A command header with an embedded user-space context handle.
+ *
+ * Convenience function: Call vmw_cmd_res_check with the user-space context
+ * handle embedded in @header.
+ */
+static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
+                            struct vmw_sw_context *sw_context,
+                            SVGA3dCmdHeader *header)
+{
+       struct vmw_cid_cmd {
+               SVGA3dCmdHeader header;
+               __le32 cid;
+       } *cmd;
+
+       cmd = container_of(header, struct vmw_cid_cmd, header);
+       return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+                                user_context_converter, &cmd->cid, NULL);
+}
 
 static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
                                           struct vmw_sw_context *sw_context,
@@ -198,7 +460,9 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
                return ret;
 
        cmd = container_of(header, struct vmw_sid_cmd, header);
-       ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.target.sid);
+       ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+                               user_surface_converter,
+                               &cmd->body.target.sid, NULL);
        return ret;
 }
 
@@ -213,10 +477,14 @@ static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
        int ret;
 
        cmd = container_of(header, struct vmw_sid_cmd, header);
-       ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
+       ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+                               user_surface_converter,
+                               &cmd->body.src.sid, NULL);
        if (unlikely(ret != 0))
                return ret;
-       return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
+       return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+                                user_surface_converter,
+                                &cmd->body.dest.sid, NULL);
 }
 
 static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
@@ -230,10 +498,14 @@ static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
        int ret;
 
        cmd = container_of(header, struct vmw_sid_cmd, header);
-       ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
+       ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+                               user_surface_converter,
+                               &cmd->body.src.sid, NULL);
        if (unlikely(ret != 0))
                return ret;
-       return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
+       return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+                                user_surface_converter,
+                                &cmd->body.dest.sid, NULL);
 }
 
 static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
@@ -252,7 +524,9 @@ static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
                return -EPERM;
        }
 
-       return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.srcImage.sid);
+       return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+                                user_surface_converter,
+                                &cmd->body.srcImage.sid, NULL);
 }
 
 static int vmw_cmd_present_check(struct vmw_private *dev_priv,
@@ -272,14 +546,15 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
                return -EPERM;
        }
 
-       return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid);
+       return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+                                user_surface_converter, &cmd->body.sid,
+                                NULL);
 }
 
 /**
  * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
  *
  * @dev_priv: The device private structure.
- * @cid: The hardware context for the next query.
  * @new_query_bo: The new buffer holding query results.
  * @sw_context: The software context used for this command submission.
  *
@@ -287,18 +562,18 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
  * query results, and if another buffer currently is pinned for query
  * results. If so, the function prepares the state of @sw_context for
  * switching pinned buffers after successful submission of the current
- * command batch. It also checks whether we're using a new query context.
- * In that case, it makes sure we emit a query barrier for the old
- * context before the current query buffer is fenced.
+ * command batch.
  */
 static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
-                                      uint32_t cid,
                                       struct ttm_buffer_object *new_query_bo,
                                       struct vmw_sw_context *sw_context)
 {
+       struct vmw_res_cache_entry *ctx_entry =
+               &sw_context->res_cache[vmw_res_context];
        int ret;
-       bool add_cid = false;
-       uint32_t cid_to_add;
+
+       BUG_ON(!ctx_entry->valid);
+       sw_context->last_query_ctx = ctx_entry->res;
 
        if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
 
@@ -308,12 +583,9 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
                }
 
                if (unlikely(sw_context->cur_query_bo != NULL)) {
-                       BUG_ON(!sw_context->query_cid_valid);
-                       add_cid = true;
-                       cid_to_add = sw_context->cur_query_cid;
+                       sw_context->needs_post_query_barrier = true;
                        ret = vmw_bo_to_validate_list(sw_context,
                                                      sw_context->cur_query_bo,
-                                                     DRM_VMW_FENCE_FLAG_EXEC,
                                                      NULL);
                        if (unlikely(ret != 0))
                                return ret;
@@ -322,35 +594,12 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
 
                ret = vmw_bo_to_validate_list(sw_context,
                                              dev_priv->dummy_query_bo,
-                                             DRM_VMW_FENCE_FLAG_EXEC,
                                              NULL);
                if (unlikely(ret != 0))
                        return ret;
 
        }
 
-       if (unlikely(cid != sw_context->cur_query_cid &&
-                    sw_context->query_cid_valid)) {
-               add_cid = true;
-               cid_to_add = sw_context->cur_query_cid;
-       }
-
-       sw_context->cur_query_cid = cid;
-       sw_context->query_cid_valid = true;
-
-       if (add_cid) {
-               struct vmw_resource *ctx = sw_context->cur_ctx;
-
-               if (list_empty(&ctx->query_head))
-                       list_add_tail(&ctx->query_head,
-                                     &sw_context->query_list);
-               ret = vmw_bo_to_validate_list(sw_context,
-                                             dev_priv->dummy_query_bo,
-                                             DRM_VMW_FENCE_FLAG_EXEC,
-                                             NULL);
-               if (unlikely(ret != 0))
-                       return ret;
-       }
        return 0;
 }
 
@@ -362,10 +611,9 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
  * @sw_context: The software context used for this command submission batch.
  *
  * This function will check if we're switching query buffers, and will then,
- * if no other query waits are issued this command submission batch,
  * issue a dummy occlusion query wait used as a query barrier. When the fence
  * object following that query wait has signaled, we are sure that all
- * preseding queries have finished, and the old query buffer can be unpinned.
+ * preceding queries have finished, and the old query buffer can be unpinned.
  * However, since both the new query buffer and the old one are fenced with
  * that fence, we can do an asynchronus unpin now, and be sure that the
  * old query buffer won't be moved until the fence has signaled.
@@ -376,20 +624,19 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
 static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
                                     struct vmw_sw_context *sw_context)
 {
-
-       struct vmw_resource *ctx, *next_ctx;
-       int ret;
-
        /*
         * The validate list should still hold references to all
         * contexts here.
         */
 
-       list_for_each_entry_safe(ctx, next_ctx, &sw_context->query_list,
-                                query_head) {
-               list_del_init(&ctx->query_head);
+       if (sw_context->needs_post_query_barrier) {
+               struct vmw_res_cache_entry *ctx_entry =
+                       &sw_context->res_cache[vmw_res_context];
+               struct vmw_resource *ctx;
+               int ret;
 
-               BUG_ON(list_empty(&ctx->validate_head));
+               BUG_ON(!ctx_entry->valid);
+               ctx = ctx_entry->res;
 
                ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
 
@@ -403,40 +650,46 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
                        ttm_bo_unref(&dev_priv->pinned_bo);
                }
 
-               vmw_bo_pin(sw_context->cur_query_bo, true);
+               if (!sw_context->needs_post_query_barrier) {
+                       vmw_bo_pin(sw_context->cur_query_bo, true);
 
-               /*
-                * We pin also the dummy_query_bo buffer so that we
-                * don't need to validate it when emitting
-                * dummy queries in context destroy paths.
-                */
+                       /*
+                        * We pin also the dummy_query_bo buffer so that we
+                        * don't need to validate it when emitting
+                        * dummy queries in context destroy paths.
+                        */
 
-               vmw_bo_pin(dev_priv->dummy_query_bo, true);
-               dev_priv->dummy_query_bo_pinned = true;
+                       vmw_bo_pin(dev_priv->dummy_query_bo, true);
+                       dev_priv->dummy_query_bo_pinned = true;
 
-               dev_priv->query_cid = sw_context->cur_query_cid;
-               dev_priv->pinned_bo =
-                       ttm_bo_reference(sw_context->cur_query_bo);
+                       BUG_ON(sw_context->last_query_ctx == NULL);
+                       dev_priv->query_cid = sw_context->last_query_ctx->id;
+                       dev_priv->query_cid_valid = true;
+                       dev_priv->pinned_bo =
+                               ttm_bo_reference(sw_context->cur_query_bo);
+               }
        }
 }
 
 /**
- * vmw_query_switch_backoff - clear query barrier list
- * @sw_context: The sw context used for this submission batch.
+ * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
+ * handle to a valid SVGAGuestPtr
  *
- * This function is used as part of an error path, where a previously
- * set up list of query barriers needs to be cleared.
+ * @dev_priv: Pointer to a device private structure.
+ * @sw_context: The software context used for this command batch validation.
+ * @ptr: Pointer to the user-space handle to be translated.
+ * @vmw_bo_p: Points to a location that, on successful return will carry
+ * a reference-counted pointer to the DMA buffer identified by the
+ * user-space handle in @id.
  *
+ * This function saves information needed to translate a user-space buffer
+ * handle to a valid SVGAGuestPtr. The translation does not take place
+ * immediately, but during a call to vmw_apply_relocations().
+ * This function builds a relocation list and a list of buffers to validate.
+ * The former needs to be freed using either vmw_apply_relocations() or
+ * vmw_free_relocations(). The latter needs to be freed using
+ * vmw_clear_validations.
  */
-static void vmw_query_switch_backoff(struct vmw_sw_context *sw_context)
-{
-       struct list_head *list, *next;
-
-       list_for_each_safe(list, next, &sw_context->query_list) {
-               list_del_init(list);
-       }
-}
-
 static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
                                   struct vmw_sw_context *sw_context,
                                   SVGAGuestPtr *ptr,
@@ -465,8 +718,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
        reloc = &sw_context->relocs[sw_context->cur_reloc++];
        reloc->location = ptr;
 
-       ret = vmw_bo_to_validate_list(sw_context, bo, DRM_VMW_FENCE_FLAG_EXEC,
-                                     &reloc->index);
+       ret = vmw_bo_to_validate_list(sw_context, bo, &reloc->index);
        if (unlikely(ret != 0))
                goto out_no_reloc;
 
@@ -479,6 +731,37 @@ out_no_reloc:
        return ret;
 }
 
+/**
+ * vmw_cmd_begin_query - validate a  SVGA_3D_CMD_BEGIN_QUERY command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context used for this command submission.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
+                              struct vmw_sw_context *sw_context,
+                              SVGA3dCmdHeader *header)
+{
+       struct vmw_begin_query_cmd {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdBeginQuery q;
+       } *cmd;
+
+       cmd = container_of(header, struct vmw_begin_query_cmd,
+                          header);
+
+       return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+                                user_context_converter, &cmd->q.cid,
+                                NULL);
+}
+
+/**
+ * vmw_cmd_end_query - validate a  SVGA_3D_CMD_END_QUERY command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context used for this command submission.
+ * @header: Pointer to the command header in the command stream.
+ */
 static int vmw_cmd_end_query(struct vmw_private *dev_priv,
                             struct vmw_sw_context *sw_context,
                             SVGA3dCmdHeader *header)
@@ -501,13 +784,19 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
        if (unlikely(ret != 0))
                return ret;
 
-       ret = vmw_query_bo_switch_prepare(dev_priv, cmd->q.cid,
-                                         &vmw_bo->base, sw_context);
+       ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
 
        vmw_dmabuf_unreference(&vmw_bo);
        return ret;
 }
 
+/*
+ * vmw_cmd_wait_query - validate a  SVGA_3D_CMD_WAIT_QUERY command.
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context used for this command submission.
+ * @header: Pointer to the command header in the command stream.
+ */
 static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
                              struct vmw_sw_context *sw_context,
                              SVGA3dCmdHeader *header)
@@ -518,7 +807,6 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
                SVGA3dCmdWaitForQuery q;
        } *cmd;
        int ret;
-       struct vmw_resource *ctx;
 
        cmd = container_of(header, struct vmw_query_cmd, header);
        ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
@@ -532,16 +820,6 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
                return ret;
 
        vmw_dmabuf_unreference(&vmw_bo);
-
-       /*
-        * This wait will act as a barrier for previous waits for this
-        * context.
-        */
-
-       ctx = sw_context->cur_ctx;
-       if (!list_empty(&ctx->query_head))
-               list_del_init(&ctx->query_head);
-
        return 0;
 }
 
@@ -550,14 +828,12 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
                       SVGA3dCmdHeader *header)
 {
        struct vmw_dma_buffer *vmw_bo = NULL;
-       struct ttm_buffer_object *bo;
        struct vmw_surface *srf = NULL;
        struct vmw_dma_cmd {
                SVGA3dCmdHeader header;
                SVGA3dCmdSurfaceDMA dma;
        } *cmd;
        int ret;
-       struct vmw_resource *res;
 
        cmd = container_of(header, struct vmw_dma_cmd, header);
        ret = vmw_translate_guest_ptr(dev_priv, sw_context,
@@ -566,37 +842,20 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
        if (unlikely(ret != 0))
                return ret;
 
-       bo = &vmw_bo->base;
-       ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile,
-                                            cmd->dma.host.sid, &srf);
-       if (ret) {
-               DRM_ERROR("could not find surface\n");
-               goto out_no_reloc;
-       }
-
-       ret = vmw_surface_validate(dev_priv, srf);
+       ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+                               user_surface_converter, &cmd->dma.host.sid,
+                               NULL);
        if (unlikely(ret != 0)) {
-               if (ret != -ERESTARTSYS)
-                       DRM_ERROR("Culd not validate surface.\n");
-               goto out_no_validate;
+               if (unlikely(ret != -ERESTARTSYS))
+                       DRM_ERROR("could not find surface for DMA.\n");
+               goto out_no_surface;
        }
 
-       /*
-        * Patch command stream with device SID.
-        */
-       cmd->dma.host.sid = srf->res.id;
-       vmw_kms_cursor_snoop(srf, sw_context->tfile, bo, header);
-
-       vmw_dmabuf_unreference(&vmw_bo);
-
-       res = &srf->res;
-       vmw_resource_to_validate_list(sw_context, &res);
+       srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
 
-       return 0;
+       vmw_kms_cursor_snoop(srf, sw_context->tfile, &vmw_bo->base, header);
 
-out_no_validate:
-       vmw_surface_unreference(&srf);
-out_no_reloc:
+out_no_surface:
        vmw_dmabuf_unreference(&vmw_bo);
        return ret;
 }
@@ -629,8 +888,9 @@ static int vmw_cmd_draw(struct vmw_private *dev_priv,
        }
 
        for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
-               ret = vmw_cmd_sid_check(dev_priv, sw_context,
-                                       &decl->array.surfaceId);
+               ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+                                       user_surface_converter,
+                                       &decl->array.surfaceId, NULL);
                if (unlikely(ret != 0))
                        return ret;
        }
@@ -644,8 +904,9 @@ static int vmw_cmd_draw(struct vmw_private *dev_priv,
 
        range = (SVGA3dPrimitiveRange *) decl;
        for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
-               ret = vmw_cmd_sid_check(dev_priv, sw_context,
-                                       &range->indexArray.surfaceId);
+               ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+                                       user_surface_converter,
+                                       &range->indexArray.surfaceId, NULL);
                if (unlikely(ret != 0))
                        return ret;
        }
@@ -676,8 +937,9 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
                if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
                        continue;
 
-               ret = vmw_cmd_sid_check(dev_priv, sw_context,
-                                       &cur_state->value);
+               ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+                                       user_surface_converter,
+                                       &cur_state->value, NULL);
                if (unlikely(ret != 0))
                        return ret;
        }
@@ -708,6 +970,34 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
        return ret;
 }
 
+/**
+ * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
+                             struct vmw_sw_context *sw_context,
+                             SVGA3dCmdHeader *header)
+{
+       struct vmw_set_shader_cmd {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdSetShader body;
+       } *cmd;
+       int ret;
+
+       cmd = container_of(header, struct vmw_set_shader_cmd,
+                          header);
+
+       ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+       if (unlikely(ret != 0))
+               return ret;
+
+       return 0;
+}
+
 static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
                                struct vmw_sw_context *sw_context,
                                void *buf, uint32_t *size)
@@ -781,16 +1071,20 @@ static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
        VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check),
        VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check),
        VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check),
-       VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_cid_check),
+       VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader),
        VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check),
        VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw),
        VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check),
-       VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check),
+       VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query),
        VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query),
        VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query),
        VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok),
        VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
-                   &vmw_cmd_blt_surf_screen_check)
+                   &vmw_cmd_blt_surf_screen_check),
+       VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid),
+       VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid),
+       VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid),
+       VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid),
 };
 
 static int vmw_cmd_check(struct vmw_private *dev_priv,
@@ -837,6 +1131,8 @@ static int vmw_cmd_check_all(struct vmw_private *dev_priv,
        int32_t cur_size = size;
        int ret;
 
+       sw_context->buf_start = buf;
+
        while (cur_size > 0) {
                size = cur_size;
                ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
@@ -868,43 +1164,63 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
 
        for (i = 0; i < sw_context->cur_reloc; ++i) {
                reloc = &sw_context->relocs[i];
-               validate = &sw_context->val_bufs[reloc->index];
+               validate = &sw_context->val_bufs[reloc->index].base;
                bo = validate->bo;
-               if (bo->mem.mem_type == TTM_PL_VRAM) {
+               switch (bo->mem.mem_type) {
+               case TTM_PL_VRAM:
                        reloc->location->offset += bo->offset;
                        reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
-               } else
+                       break;
+               case VMW_PL_GMR:
                        reloc->location->gmrId = bo->mem.start;
+                       break;
+               default:
+                       BUG();
+               }
        }
        vmw_free_relocations(sw_context);
 }
 
+/**
+ * vmw_resource_list_unrefererence - Free up a resource list and unreference
+ * all resources referenced by it.
+ *
+ * @list: The resource list.
+ */
+static void vmw_resource_list_unreference(struct list_head *list)
+{
+       struct vmw_resource_val_node *val, *val_next;
+
+       /*
+        * Drop references to resources held during command submission.
+        */
+
+       list_for_each_entry_safe(val, val_next, list, head) {
+               list_del_init(&val->head);
+               vmw_resource_unreference(&val->res);
+               kfree(val);
+       }
+}
+
 static void vmw_clear_validations(struct vmw_sw_context *sw_context)
 {
-       struct ttm_validate_buffer *entry, *next;
-       struct vmw_resource *res, *res_next;
+       struct vmw_validate_buffer *entry, *next;
+       struct vmw_resource_val_node *val;
 
        /*
         * Drop references to DMA buffers held during command submission.
         */
        list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
-                                head) {
-               list_del(&entry->head);
-               vmw_dmabuf_validate_clear(entry->bo);
-               ttm_bo_unref(&entry->bo);
+                                base.head) {
+               list_del(&entry->base.head);
+               ttm_bo_unref(&entry->base.bo);
+               (void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash);
                sw_context->cur_val_buf--;
        }
        BUG_ON(sw_context->cur_val_buf != 0);
 
-       /*
-        * Drop references to resources held during command submission.
-        */
-       vmw_resource_unreserve(&sw_context->resource_list);
-       list_for_each_entry_safe(res, res_next, &sw_context->resource_list,
-                                validate_head) {
-               list_del_init(&res->validate_head);
-               vmw_resource_unreference(&res);
-       }
+       list_for_each_entry(val, &sw_context->resource_list, head)
+               (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
 }
 
 static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
@@ -929,7 +1245,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
         * used as a GMR, this will return -ENOMEM.
         */
 
-       ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false, false);
+       ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false);
        if (likely(ret == 0 || ret == -ERESTARTSYS))
                return ret;
 
@@ -939,7 +1255,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
         */
 
        DRM_INFO("Falling through to VRAM.\n");
-       ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false, false);
+       ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
        return ret;
 }
 
@@ -947,11 +1263,11 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
 static int vmw_validate_buffers(struct vmw_private *dev_priv,
                                struct vmw_sw_context *sw_context)
 {
-       struct ttm_validate_buffer *entry;
+       struct vmw_validate_buffer *entry;
        int ret;
 
-       list_for_each_entry(entry, &sw_context->validate_nodes, head) {
-               ret = vmw_validate_single_buffer(dev_priv, entry->bo);
+       list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
+               ret = vmw_validate_single_buffer(dev_priv, entry->base.bo);
                if (unlikely(ret != 0))
                        return ret;
        }
@@ -1114,6 +1430,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
 {
        struct vmw_sw_context *sw_context = &dev_priv->ctx;
        struct vmw_fence_obj *fence = NULL;
+       struct vmw_resource *error_resource;
+       struct list_head resource_list;
        uint32_t handle;
        void *cmd;
        int ret;
@@ -1143,24 +1461,33 @@ int vmw_execbuf_process(struct drm_file *file_priv,
                sw_context->kernel = true;
 
        sw_context->tfile = vmw_fpriv(file_priv)->tfile;
-       sw_context->cid_valid = false;
-       sw_context->sid_valid = false;
        sw_context->cur_reloc = 0;
        sw_context->cur_val_buf = 0;
        sw_context->fence_flags = 0;
-       INIT_LIST_HEAD(&sw_context->query_list);
        INIT_LIST_HEAD(&sw_context->resource_list);
        sw_context->cur_query_bo = dev_priv->pinned_bo;
-       sw_context->cur_query_cid = dev_priv->query_cid;
-       sw_context->query_cid_valid = (dev_priv->pinned_bo != NULL);
-
+       sw_context->last_query_ctx = NULL;
+       sw_context->needs_post_query_barrier = false;
+       memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
        INIT_LIST_HEAD(&sw_context->validate_nodes);
+       INIT_LIST_HEAD(&sw_context->res_relocations);
+       if (!sw_context->res_ht_initialized) {
+               ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
+               if (unlikely(ret != 0))
+                       goto out_unlock;
+               sw_context->res_ht_initialized = true;
+       }
 
+       INIT_LIST_HEAD(&resource_list);
        ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
                                command_size);
        if (unlikely(ret != 0))
                goto out_err;
 
+       ret = vmw_resources_reserve(sw_context);
+       if (unlikely(ret != 0))
+               goto out_err;
+
        ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes);
        if (unlikely(ret != 0))
                goto out_err;
@@ -1169,24 +1496,31 @@ int vmw_execbuf_process(struct drm_file *file_priv,
        if (unlikely(ret != 0))
                goto out_err;
 
-       vmw_apply_relocations(sw_context);
+       ret = vmw_resources_validate(sw_context);
+       if (unlikely(ret != 0))
+               goto out_err;
 
        if (throttle_us) {
                ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
                                   throttle_us);
 
                if (unlikely(ret != 0))
-                       goto out_throttle;
+                       goto out_err;
        }
 
        cmd = vmw_fifo_reserve(dev_priv, command_size);
        if (unlikely(cmd == NULL)) {
                DRM_ERROR("Failed reserving fifo space for commands.\n");
                ret = -ENOMEM;
-               goto out_throttle;
+               goto out_err;
        }
 
+       vmw_apply_relocations(sw_context);
        memcpy(cmd, kernel_commands, command_size);
+
+       vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
+       vmw_resource_relocations_free(&sw_context->res_relocations);
+
        vmw_fifo_commit(dev_priv, command_size);
 
        vmw_query_bo_switch_commit(dev_priv, sw_context);
@@ -1202,9 +1536,14 @@ int vmw_execbuf_process(struct drm_file *file_priv,
        if (ret != 0)
                DRM_ERROR("Fence submission error. Syncing.\n");
 
+       vmw_resource_list_unreserve(&sw_context->resource_list, false);
        ttm_eu_fence_buffer_objects(&sw_context->validate_nodes,
                                    (void *) fence);
 
+       if (unlikely(dev_priv->pinned_bo != NULL &&
+                    !dev_priv->query_cid_valid))
+               __vmw_execbuf_release_pinned_bo(dev_priv, fence);
+
        vmw_clear_validations(sw_context);
        vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
                                    user_fence_rep, fence, handle);
@@ -1217,17 +1556,40 @@ int vmw_execbuf_process(struct drm_file *file_priv,
                vmw_fence_obj_unreference(&fence);
        }
 
+       list_splice_init(&sw_context->resource_list, &resource_list);
        mutex_unlock(&dev_priv->cmdbuf_mutex);
+
+       /*
+        * Unreference resources outside of the cmdbuf_mutex to
+        * avoid deadlocks in resource destruction paths.
+        */
+       vmw_resource_list_unreference(&resource_list);
+
        return 0;
 
 out_err:
+       vmw_resource_relocations_free(&sw_context->res_relocations);
        vmw_free_relocations(sw_context);
-out_throttle:
-       vmw_query_switch_backoff(sw_context);
        ttm_eu_backoff_reservation(&sw_context->validate_nodes);
+       vmw_resource_list_unreserve(&sw_context->resource_list, true);
        vmw_clear_validations(sw_context);
+       if (unlikely(dev_priv->pinned_bo != NULL &&
+                    !dev_priv->query_cid_valid))
+               __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
 out_unlock:
+       list_splice_init(&sw_context->resource_list, &resource_list);
+       error_resource = sw_context->error_resource;
+       sw_context->error_resource = NULL;
        mutex_unlock(&dev_priv->cmdbuf_mutex);
+
+       /*
+        * Unreference resources outside of the cmdbuf_mutex to
+        * avoid deadlocks in resource destruction paths.
+        */
+       vmw_resource_list_unreference(&resource_list);
+       if (unlikely(error_resource != NULL))
+               vmw_resource_unreference(&error_resource);
+
        return ret;
 }
 
@@ -1252,13 +1614,13 @@ static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
 
 
 /**
- * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
+ * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
  * query bo.
  *
  * @dev_priv: The device private structure.
- * @only_on_cid_match: Only flush and unpin if the current active query cid
- * matches @cid.
- * @cid: Optional context id to match.
+ * @fence: If non-NULL should point to a struct vmw_fence_obj issued
+ * _after_ a query barrier that flushes all queries touching the current
+ * buffer pointed to by @dev_priv->pinned_bo
  *
  * This function should be used to unpin the pinned query bo, or
  * as a query barrier when we need to make sure that all queries have
@@ -1271,31 +1633,26 @@ static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
  *
  * The function will synchronize on the previous query barrier, and will
  * thus not finish until that barrier has executed.
+ *
+ * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
+ * before calling this function.
  */
-void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
-                                  bool only_on_cid_match, uint32_t cid)
+void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
+                                    struct vmw_fence_obj *fence)
 {
        int ret = 0;
        struct list_head validate_list;
        struct ttm_validate_buffer pinned_val, query_val;
-       struct vmw_fence_obj *fence;
-
-       mutex_lock(&dev_priv->cmdbuf_mutex);
+       struct vmw_fence_obj *lfence = NULL;
 
        if (dev_priv->pinned_bo == NULL)
                goto out_unlock;
 
-       if (only_on_cid_match && cid != dev_priv->query_cid)
-               goto out_unlock;
-
        INIT_LIST_HEAD(&validate_list);
 
-       pinned_val.new_sync_obj_arg = (void *)(unsigned long)
-               DRM_VMW_FENCE_FLAG_EXEC;
        pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo);
        list_add_tail(&pinned_val.head, &validate_list);
 
-       query_val.new_sync_obj_arg = pinned_val.new_sync_obj_arg;
        query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo);
        list_add_tail(&query_val.head, &validate_list);
 
@@ -1308,25 +1665,34 @@ void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
                goto out_no_reserve;
        }
 
-       ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
-       if (unlikely(ret != 0)) {
-               vmw_execbuf_unpin_panic(dev_priv);
-               goto out_no_emit;
+       if (dev_priv->query_cid_valid) {
+               BUG_ON(fence != NULL);
+               ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
+               if (unlikely(ret != 0)) {
+                       vmw_execbuf_unpin_panic(dev_priv);
+                       goto out_no_emit;
+               }
+               dev_priv->query_cid_valid = false;
        }
 
        vmw_bo_pin(dev_priv->pinned_bo, false);
        vmw_bo_pin(dev_priv->dummy_query_bo, false);
        dev_priv->dummy_query_bo_pinned = false;
 
-       (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+       if (fence == NULL) {
+               (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
+                                                 NULL);
+               fence = lfence;
+       }
        ttm_eu_fence_buffer_objects(&validate_list, (void *) fence);
+       if (lfence != NULL)
+               vmw_fence_obj_unreference(&lfence);
 
        ttm_bo_unref(&query_val.bo);
        ttm_bo_unref(&pinned_val.bo);
        ttm_bo_unref(&dev_priv->pinned_bo);
 
 out_unlock:
-       mutex_unlock(&dev_priv->cmdbuf_mutex);
        return;
 
 out_no_emit:
@@ -1335,6 +1701,31 @@ out_no_reserve:
        ttm_bo_unref(&query_val.bo);
        ttm_bo_unref(&pinned_val.bo);
        ttm_bo_unref(&dev_priv->pinned_bo);
+}
+
+/**
+ * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
+ * query bo.
+ *
+ * @dev_priv: The device private structure.
+ *
+ * This function should be used to unpin the pinned query bo, or
+ * as a query barrier when we need to make sure that all queries have
+ * finished before the next fifo command. (For example on hardware
+ * context destructions where the hardware may otherwise leak unfinished
+ * queries).
+ *
+ * This function does not return any failure codes, but make attempts
+ * to do safe unpinning in case of errors.
+ *
+ * The function will synchronize on the previous query barrier, and will
+ * thus not finish until that barrier has executed.
+ */
+void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
+{
+       mutex_lock(&dev_priv->cmdbuf_mutex);
+       if (dev_priv->query_cid_valid)
+               __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
        mutex_unlock(&dev_priv->cmdbuf_mutex);
 }
 
index bc187fa..c62d20e 100644 (file)
@@ -537,7 +537,7 @@ static void vmw_user_fence_destroy(struct vmw_fence_obj *fence)
                container_of(fence, struct vmw_user_fence, fence);
        struct vmw_fence_manager *fman = fence->fman;
 
-       kfree(ufence);
+       ttm_base_object_kfree(ufence, base);
        /*
         * Free kernel space accounting.
         */
index 7290811..d9fbbe1 100644 (file)
@@ -133,6 +133,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
        struct drm_vmw_rect *clips = NULL;
        struct drm_mode_object *obj;
        struct vmw_framebuffer *vfb;
+       struct vmw_resource *res;
        uint32_t num_clips;
        int ret;
 
@@ -180,11 +181,13 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
        if (unlikely(ret != 0))
                goto out_no_ttm_lock;
 
-       ret = vmw_user_surface_lookup_handle(dev_priv, tfile, arg->sid,
-                                            &surface);
+       ret = vmw_user_resource_lookup_handle(dev_priv, tfile, arg->sid,
+                                             user_surface_converter,
+                                             &res);
        if (ret)
                goto out_no_surface;
 
+       surface = vmw_res_to_srf(res);
        ret = vmw_kms_present(dev_priv, file_priv,
                              vfb, surface, arg->sid,
                              arg->dest_x, arg->dest_y,
index 070fb23..79f7e8e 100644 (file)
@@ -373,7 +373,7 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
 
        drm_mode_crtc_set_gamma_size(crtc, 256);
 
-       drm_connector_attach_property(connector,
+       drm_object_attach_property(&connector->base,
                                      dev->mode_config.dirty_info_property,
                                      1);
 
index cb55b7b..87e39f6 100644 (file)
@@ -35,6 +35,7 @@
 #include "svga_escape.h"
 
 #define VMW_MAX_NUM_STREAMS 1
+#define VMW_OVERLAY_CAP_MASK (SVGA_FIFO_CAP_VIDEO | SVGA_FIFO_CAP_ESCAPE)
 
 struct vmw_stream {
        struct vmw_dma_buffer *buf;
@@ -449,6 +450,14 @@ int vmw_overlay_pause_all(struct vmw_private *dev_priv)
        return 0;
 }
 
+
+static bool vmw_overlay_available(const struct vmw_private *dev_priv)
+{
+       return (dev_priv->overlay_priv != NULL && 
+               ((dev_priv->fifo.capabilities & VMW_OVERLAY_CAP_MASK) ==
+                VMW_OVERLAY_CAP_MASK));
+}
+
 int vmw_overlay_ioctl(struct drm_device *dev, void *data,
                      struct drm_file *file_priv)
 {
@@ -461,7 +470,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data,
        struct vmw_resource *res;
        int ret;
 
-       if (!overlay)
+       if (!vmw_overlay_available(dev_priv))
                return -ENOSYS;
 
        ret = vmw_user_stream_lookup(dev_priv, tfile, &arg->stream_id, &res);
@@ -492,7 +501,7 @@ out_unlock:
 
 int vmw_overlay_num_overlays(struct vmw_private *dev_priv)
 {
-       if (!dev_priv->overlay_priv)
+       if (!vmw_overlay_available(dev_priv))
                return 0;
 
        return VMW_MAX_NUM_STREAMS;
@@ -503,7 +512,7 @@ int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv)
        struct vmw_overlay *overlay = dev_priv->overlay_priv;
        int i, k;
 
-       if (!overlay)
+       if (!vmw_overlay_available(dev_priv))
                return 0;
 
        mutex_lock(&overlay->mutex);
@@ -569,12 +578,6 @@ int vmw_overlay_init(struct vmw_private *dev_priv)
        if (dev_priv->overlay_priv)
                return -EINVAL;
 
-       if (!(dev_priv->fifo.capabilities & SVGA_FIFO_CAP_VIDEO) &&
-            (dev_priv->fifo.capabilities & SVGA_FIFO_CAP_ESCAPE)) {
-               DRM_INFO("hardware doesn't support overlays\n");
-               return -ENOSYS;
-       }
-
        overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
        if (!overlay)
                return -ENOMEM;
index da3c6b5..e01a17b 100644 (file)
 #include <drm/ttm/ttm_object.h>
 #include <drm/ttm/ttm_placement.h>
 #include <drm/drmP.h>
-
-struct vmw_user_context {
-       struct ttm_base_object base;
-       struct vmw_resource res;
-};
-
-struct vmw_user_surface {
-       struct ttm_base_object base;
-       struct vmw_surface srf;
-       uint32_t size;
-};
+#include "vmwgfx_resource_priv.h"
 
 struct vmw_user_dma_buffer {
        struct ttm_base_object base;
@@ -62,17 +52,21 @@ struct vmw_user_stream {
        struct vmw_stream stream;
 };
 
-struct vmw_surface_offset {
-       uint32_t face;
-       uint32_t mip;
-       uint32_t bo_offset;
-};
-
 
-static uint64_t vmw_user_context_size;
-static uint64_t vmw_user_surface_size;
 static uint64_t vmw_user_stream_size;
 
+static const struct vmw_res_func vmw_stream_func = {
+       .res_type = vmw_res_stream,
+       .needs_backup = false,
+       .may_evict = false,
+       .type_name = "video streams",
+       .backup_placement = NULL,
+       .create = NULL,
+       .destroy = NULL,
+       .bind = NULL,
+       .unbind = NULL
+};
+
 static inline struct vmw_dma_buffer *
 vmw_dma_buffer(struct ttm_buffer_object *bo)
 {
@@ -100,13 +94,14 @@ struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
  *
  * Release the resource id to the resource id manager and set it to -1
  */
-static void vmw_resource_release_id(struct vmw_resource *res)
+void vmw_resource_release_id(struct vmw_resource *res)
 {
        struct vmw_private *dev_priv = res->dev_priv;
+       struct idr *idr = &dev_priv->res_idr[res->func->res_type];
 
        write_lock(&dev_priv->resource_lock);
        if (res->id != -1)
-               idr_remove(res->idr, res->id);
+               idr_remove(idr, res->id);
        res->id = -1;
        write_unlock(&dev_priv->resource_lock);
 }
@@ -116,17 +111,33 @@ static void vmw_resource_release(struct kref *kref)
        struct vmw_resource *res =
            container_of(kref, struct vmw_resource, kref);
        struct vmw_private *dev_priv = res->dev_priv;
-       int id = res->id;
-       struct idr *idr = res->idr;
+       int id;
+       struct idr *idr = &dev_priv->res_idr[res->func->res_type];
 
        res->avail = false;
-       if (res->remove_from_lists != NULL)
-               res->remove_from_lists(res);
+       list_del_init(&res->lru_head);
        write_unlock(&dev_priv->resource_lock);
+       if (res->backup) {
+               struct ttm_buffer_object *bo = &res->backup->base;
+
+               ttm_bo_reserve(bo, false, false, false, 0);
+               if (!list_empty(&res->mob_head) &&
+                   res->func->unbind != NULL) {
+                       struct ttm_validate_buffer val_buf;
+
+                       val_buf.bo = bo;
+                       res->func->unbind(res, false, &val_buf);
+               }
+               res->backup_dirty = false;
+               list_del_init(&res->mob_head);
+               ttm_bo_unreserve(bo);
+               vmw_dmabuf_unreference(&res->backup);
+       }
 
        if (likely(res->hw_destroy != NULL))
                res->hw_destroy(res);
 
+       id = res->id;
        if (res->res_free != NULL)
                res->res_free(res);
        else
@@ -153,25 +164,25 @@ void vmw_resource_unreference(struct vmw_resource **p_res)
 /**
  * vmw_resource_alloc_id - release a resource id to the id manager.
  *
- * @dev_priv: Pointer to the device private structure.
  * @res: Pointer to the resource.
  *
  * Allocate the lowest free resource from the resource manager, and set
  * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
  */
-static int vmw_resource_alloc_id(struct vmw_private *dev_priv,
-                                struct vmw_resource *res)
+int vmw_resource_alloc_id(struct vmw_resource *res)
 {
+       struct vmw_private *dev_priv = res->dev_priv;
        int ret;
+       struct idr *idr = &dev_priv->res_idr[res->func->res_type];
 
        BUG_ON(res->id != -1);
 
        do {
-               if (unlikely(idr_pre_get(res->idr, GFP_KERNEL) == 0))
+               if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
                        return -ENOMEM;
 
                write_lock(&dev_priv->resource_lock);
-               ret = idr_get_new_above(res->idr, res, 1, &res->id);
+               ret = idr_get_new_above(idr, res, 1, &res->id);
                write_unlock(&dev_priv->resource_lock);
 
        } while (ret == -EAGAIN);
@@ -179,31 +190,39 @@ static int vmw_resource_alloc_id(struct vmw_private *dev_priv,
        return ret;
 }
 
-
-static int vmw_resource_init(struct vmw_private *dev_priv,
-                            struct vmw_resource *res,
-                            struct idr *idr,
-                            enum ttm_object_type obj_type,
-                            bool delay_id,
-                            void (*res_free) (struct vmw_resource *res),
-                            void (*remove_from_lists)
-                            (struct vmw_resource *res))
+/**
+ * vmw_resource_init - initialize a struct vmw_resource
+ *
+ * @dev_priv:       Pointer to a device private struct.
+ * @res:            The struct vmw_resource to initialize.
+ * @obj_type:       Resource object type.
+ * @delay_id:       Boolean whether to defer device id allocation until
+ *                  the first validation.
+ * @res_free:       Resource destructor.
+ * @func:           Resource function table.
+ */
+int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
+                     bool delay_id,
+                     void (*res_free) (struct vmw_resource *res),
+                     const struct vmw_res_func *func)
 {
        kref_init(&res->kref);
        res->hw_destroy = NULL;
        res->res_free = res_free;
-       res->remove_from_lists = remove_from_lists;
-       res->res_type = obj_type;
-       res->idr = idr;
        res->avail = false;
        res->dev_priv = dev_priv;
-       INIT_LIST_HEAD(&res->query_head);
-       INIT_LIST_HEAD(&res->validate_head);
+       res->func = func;
+       INIT_LIST_HEAD(&res->lru_head);
+       INIT_LIST_HEAD(&res->mob_head);
        res->id = -1;
+       res->backup = NULL;
+       res->backup_offset = 0;
+       res->backup_dirty = false;
+       res->res_dirty = false;
        if (delay_id)
                return 0;
        else
-               return vmw_resource_alloc_id(dev_priv, res);
+               return vmw_resource_alloc_id(res);
 }
 
 /**
@@ -218,9 +237,8 @@ static int vmw_resource_init(struct vmw_private *dev_priv,
  * Activate basically means that the function vmw_resource_lookup will
  * find it.
  */
-
-static void vmw_resource_activate(struct vmw_resource *res,
-                                 void (*hw_destroy) (struct vmw_resource *))
+void vmw_resource_activate(struct vmw_resource *res,
+                          void (*hw_destroy) (struct vmw_resource *))
 {
        struct vmw_private *dev_priv = res->dev_priv;
 
@@ -250,1396 +268,245 @@ struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
 }
 
 /**
- * Context management:
+ * vmw_user_resource_lookup_handle - lookup a struct resource from a
+ * TTM user-space handle and perform basic type checks
+ *
+ * @dev_priv:     Pointer to a device private struct
+ * @tfile:        Pointer to a struct ttm_object_file identifying the caller
+ * @handle:       The TTM user-space handle
+ * @converter:    Pointer to an object describing the resource type
+ * @p_res:        On successful return the location pointed to will contain
+ *                a pointer to a refcounted struct vmw_resource.
+ *
+ * If the handle can't be found or is associated with an incorrect resource
+ * type, -EINVAL will be returned.
  */
-
-static void vmw_hw_context_destroy(struct vmw_resource *res)
-{
-
-       struct vmw_private *dev_priv = res->dev_priv;
-       struct {
-               SVGA3dCmdHeader header;
-               SVGA3dCmdDestroyContext body;
-       } *cmd;
-
-
-       vmw_execbuf_release_pinned_bo(dev_priv, true, res->id);
-
-       cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
-       if (unlikely(cmd == NULL)) {
-               DRM_ERROR("Failed reserving FIFO space for surface "
-                         "destruction.\n");
-               return;
-       }
-
-       cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
-       cmd->header.size = cpu_to_le32(sizeof(cmd->body));
-       cmd->body.cid = cpu_to_le32(res->id);
-
-       vmw_fifo_commit(dev_priv, sizeof(*cmd));
-       vmw_3d_resource_dec(dev_priv, false);
-}
-
-static int vmw_context_init(struct vmw_private *dev_priv,
-                           struct vmw_resource *res,
-                           void (*res_free) (struct vmw_resource *res))
+int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
+                                   struct ttm_object_file *tfile,
+                                   uint32_t handle,
+                                   const struct vmw_user_resource_conv
+                                   *converter,
+                                   struct vmw_resource **p_res)
 {
-       int ret;
+       struct ttm_base_object *base;
+       struct vmw_resource *res;
+       int ret = -EINVAL;
 
-       struct {
-               SVGA3dCmdHeader header;
-               SVGA3dCmdDefineContext body;
-       } *cmd;
+       base = ttm_base_object_lookup(tfile, handle);
+       if (unlikely(base == NULL))
+               return -EINVAL;
 
-       ret = vmw_resource_init(dev_priv, res, &dev_priv->context_idr,
-                               VMW_RES_CONTEXT, false, res_free, NULL);
+       if (unlikely(base->object_type != converter->object_type))
+               goto out_bad_resource;
 
-       if (unlikely(ret != 0)) {
-               DRM_ERROR("Failed to allocate a resource id.\n");
-               goto out_early;
-       }
+       res = converter->base_obj_to_res(base);
 
-       if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) {
-               DRM_ERROR("Out of hw context ids.\n");
-               vmw_resource_unreference(&res);
-               return -ENOMEM;
+       read_lock(&dev_priv->resource_lock);
+       if (!res->avail || res->res_free != converter->res_free) {
+               read_unlock(&dev_priv->resource_lock);
+               goto out_bad_resource;
        }
 
-       cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
-       if (unlikely(cmd == NULL)) {
-               DRM_ERROR("Fifo reserve failed.\n");
-               vmw_resource_unreference(&res);
-               return -ENOMEM;
-       }
+       kref_get(&res->kref);
+       read_unlock(&dev_priv->resource_lock);
 
-       cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
-       cmd->header.size = cpu_to_le32(sizeof(cmd->body));
-       cmd->body.cid = cpu_to_le32(res->id);
+       *p_res = res;
+       ret = 0;
 
-       vmw_fifo_commit(dev_priv, sizeof(*cmd));
-       (void) vmw_3d_resource_inc(dev_priv, false);
-       vmw_resource_activate(res, vmw_hw_context_destroy);
-       return 0;
+out_bad_resource:
+       ttm_base_object_unref(&base);
 
-out_early:
-       if (res_free == NULL)
-               kfree(res);
-       else
-               res_free(res);
        return ret;
 }
 
-struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
+/**
+ * Helper function that looks either a surface or dmabuf.
+ *
+ * The pointer this pointed at by out_surf and out_buf needs to be null.
+ */
+int vmw_user_lookup_handle(struct vmw_private *dev_priv,
+                          struct ttm_object_file *tfile,
+                          uint32_t handle,
+                          struct vmw_surface **out_surf,
+                          struct vmw_dma_buffer **out_buf)
 {
-       struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
+       struct vmw_resource *res;
        int ret;
 
-       if (unlikely(res == NULL))
-               return NULL;
-
-       ret = vmw_context_init(dev_priv, res, NULL);
-       return (ret == 0) ? res : NULL;
-}
-
-/**
- * User-space context management:
- */
+       BUG_ON(*out_surf || *out_buf);
 
-static void vmw_user_context_free(struct vmw_resource *res)
-{
-       struct vmw_user_context *ctx =
-           container_of(res, struct vmw_user_context, res);
-       struct vmw_private *dev_priv = res->dev_priv;
+       ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
+                                             user_surface_converter,
+                                             &res);
+       if (!ret) {
+               *out_surf = vmw_res_to_srf(res);
+               return 0;
+       }
 
-       kfree(ctx);
-       ttm_mem_global_free(vmw_mem_glob(dev_priv),
-                           vmw_user_context_size);
+       *out_surf = NULL;
+       ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
+       return ret;
 }
 
 /**
- * This function is called when user space has no more references on the
- * base object. It releases the base-object's reference on the resource object.
+ * Buffer management.
  */
-
-static void vmw_user_context_base_release(struct ttm_base_object **p_base)
+void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
 {
-       struct ttm_base_object *base = *p_base;
-       struct vmw_user_context *ctx =
-           container_of(base, struct vmw_user_context, base);
-       struct vmw_resource *res = &ctx->res;
+       struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
 
-       *p_base = NULL;
-       vmw_resource_unreference(&res);
+       kfree(vmw_bo);
 }
 
-int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
-                             struct drm_file *file_priv)
+int vmw_dmabuf_init(struct vmw_private *dev_priv,
+                   struct vmw_dma_buffer *vmw_bo,
+                   size_t size, struct ttm_placement *placement,
+                   bool interruptible,
+                   void (*bo_free) (struct ttm_buffer_object *bo))
 {
-       struct vmw_private *dev_priv = vmw_priv(dev);
-       struct vmw_resource *res;
-       struct vmw_user_context *ctx;
-       struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
-       struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
-       int ret = 0;
+       struct ttm_bo_device *bdev = &dev_priv->bdev;
+       size_t acc_size;
+       int ret;
 
-       res = vmw_resource_lookup(dev_priv, &dev_priv->context_idr, arg->cid);
-       if (unlikely(res == NULL))
-               return -EINVAL;
+       BUG_ON(!bo_free);
 
-       if (res->res_free != &vmw_user_context_free) {
-               ret = -EINVAL;
-               goto out;
-       }
+       acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct vmw_dma_buffer));
+       memset(vmw_bo, 0, sizeof(*vmw_bo));
 
-       ctx = container_of(res, struct vmw_user_context, res);
-       if (ctx->base.tfile != tfile && !ctx->base.shareable) {
-               ret = -EPERM;
-               goto out;
-       }
+       INIT_LIST_HEAD(&vmw_bo->res_list);
 
-       ttm_ref_object_base_unref(tfile, ctx->base.hash.key, TTM_REF_USAGE);
-out:
-       vmw_resource_unreference(&res);
+       ret = ttm_bo_init(bdev, &vmw_bo->base, size,
+                         ttm_bo_type_device, placement,
+                         0, interruptible,
+                         NULL, acc_size, NULL, bo_free);
        return ret;
 }
 
-int vmw_context_define_ioctl(struct drm_device *dev, void *data,
-                            struct drm_file *file_priv)
+static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
 {
-       struct vmw_private *dev_priv = vmw_priv(dev);
-       struct vmw_user_context *ctx;
-       struct vmw_resource *res;
-       struct vmw_resource *tmp;
-       struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
-       struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
-       struct vmw_master *vmaster = vmw_master(file_priv->master);
-       int ret;
-
-
-       /*
-        * Approximate idr memory usage with 128 bytes. It will be limited
-        * by maximum number_of contexts anyway.
-        */
-
-       if (unlikely(vmw_user_context_size == 0))
-               vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128;
-
-       ret = ttm_read_lock(&vmaster->lock, true);
-       if (unlikely(ret != 0))
-               return ret;
-
-       ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
-                                  vmw_user_context_size,
-                                  false, true);
-       if (unlikely(ret != 0)) {
-               if (ret != -ERESTARTSYS)
-                       DRM_ERROR("Out of graphics memory for context"
-                                 " creation.\n");
-               goto out_unlock;
-       }
-
-       ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
-       if (unlikely(ctx == NULL)) {
-               ttm_mem_global_free(vmw_mem_glob(dev_priv),
-                                   vmw_user_context_size);
-               ret = -ENOMEM;
-               goto out_unlock;
-       }
-
-       res = &ctx->res;
-       ctx->base.shareable = false;
-       ctx->base.tfile = NULL;
-
-       /*
-        * From here on, the destructor takes over resource freeing.
-        */
-
-       ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
-       if (unlikely(ret != 0))
-               goto out_unlock;
-
-       tmp = vmw_resource_reference(&ctx->res);
-       ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
-                                  &vmw_user_context_base_release, NULL);
-
-       if (unlikely(ret != 0)) {
-               vmw_resource_unreference(&tmp);
-               goto out_err;
-       }
-
-       arg->cid = res->id;
-out_err:
-       vmw_resource_unreference(&res);
-out_unlock:
-       ttm_read_unlock(&vmaster->lock);
-       return ret;
+       struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
 
+       ttm_base_object_kfree(vmw_user_bo, base);
 }
 
-int vmw_context_check(struct vmw_private *dev_priv,
-                     struct ttm_object_file *tfile,
-                     int id,
-                     struct vmw_resource **p_res)
+static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
 {
-       struct vmw_resource *res;
-       int ret = 0;
-
-       read_lock(&dev_priv->resource_lock);
-       res = idr_find(&dev_priv->context_idr, id);
-       if (res && res->avail) {
-               struct vmw_user_context *ctx =
-                       container_of(res, struct vmw_user_context, res);
-               if (ctx->base.tfile != tfile && !ctx->base.shareable)
-                       ret = -EPERM;
-               if (p_res)
-                       *p_res = vmw_resource_reference(res);
-       } else
-               ret = -EINVAL;
-       read_unlock(&dev_priv->resource_lock);
-
-       return ret;
-}
+       struct vmw_user_dma_buffer *vmw_user_bo;
+       struct ttm_base_object *base = *p_base;
+       struct ttm_buffer_object *bo;
 
-struct vmw_bpp {
-       uint8_t bpp;
-       uint8_t s_bpp;
-};
+       *p_base = NULL;
 
-/*
- * Size table for the supported SVGA3D surface formats. It consists of
- * two values. The bpp value and the s_bpp value which is short for
- * "stride bits per pixel" The values are given in such a way that the
- * minimum stride for the image is calculated using
- *
- * min_stride = w*s_bpp
- *
- * and the total memory requirement for the image is
- *
- * h*min_stride*bpp/s_bpp
- *
- */
-static const struct vmw_bpp vmw_sf_bpp[] = {
-       [SVGA3D_FORMAT_INVALID] = {0, 0},
-       [SVGA3D_X8R8G8B8] = {32, 32},
-       [SVGA3D_A8R8G8B8] = {32, 32},
-       [SVGA3D_R5G6B5] = {16, 16},
-       [SVGA3D_X1R5G5B5] = {16, 16},
-       [SVGA3D_A1R5G5B5] = {16, 16},
-       [SVGA3D_A4R4G4B4] = {16, 16},
-       [SVGA3D_Z_D32] = {32, 32},
-       [SVGA3D_Z_D16] = {16, 16},
-       [SVGA3D_Z_D24S8] = {32, 32},
-       [SVGA3D_Z_D15S1] = {16, 16},
-       [SVGA3D_LUMINANCE8] = {8, 8},
-       [SVGA3D_LUMINANCE4_ALPHA4] = {8, 8},
-       [SVGA3D_LUMINANCE16] = {16, 16},
-       [SVGA3D_LUMINANCE8_ALPHA8] = {16, 16},
-       [SVGA3D_DXT1] = {4, 16},
-       [SVGA3D_DXT2] = {8, 32},
-       [SVGA3D_DXT3] = {8, 32},
-       [SVGA3D_DXT4] = {8, 32},
-       [SVGA3D_DXT5] = {8, 32},
-       [SVGA3D_BUMPU8V8] = {16, 16},
-       [SVGA3D_BUMPL6V5U5] = {16, 16},
-       [SVGA3D_BUMPX8L8V8U8] = {32, 32},
-       [SVGA3D_ARGB_S10E5] = {16, 16},
-       [SVGA3D_ARGB_S23E8] = {32, 32},
-       [SVGA3D_A2R10G10B10] = {32, 32},
-       [SVGA3D_V8U8] = {16, 16},
-       [SVGA3D_Q8W8V8U8] = {32, 32},
-       [SVGA3D_CxV8U8] = {16, 16},
-       [SVGA3D_X8L8V8U8] = {32, 32},
-       [SVGA3D_A2W10V10U10] = {32, 32},
-       [SVGA3D_ALPHA8] = {8, 8},
-       [SVGA3D_R_S10E5] = {16, 16},
-       [SVGA3D_R_S23E8] = {32, 32},
-       [SVGA3D_RG_S10E5] = {16, 16},
-       [SVGA3D_RG_S23E8] = {32, 32},
-       [SVGA3D_BUFFER] = {8, 8},
-       [SVGA3D_Z_D24X8] = {32, 32},
-       [SVGA3D_V16U16] = {32, 32},
-       [SVGA3D_G16R16] = {32, 32},
-       [SVGA3D_A16B16G16R16] = {64,  64},
-       [SVGA3D_UYVY] = {12, 12},
-       [SVGA3D_YUY2] = {12, 12},
-       [SVGA3D_NV12] = {12, 8},
-       [SVGA3D_AYUV] = {32, 32},
-       [SVGA3D_BC4_UNORM] = {4,  16},
-       [SVGA3D_BC5_UNORM] = {8,  32},
-       [SVGA3D_Z_DF16] = {16,  16},
-       [SVGA3D_Z_DF24] = {24,  24},
-       [SVGA3D_Z_D24S8_INT] = {32,  32}
-};
+       if (unlikely(base == NULL))
+               return;
 
+       vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
+       bo = &vmw_user_bo->dma.base;
+       ttm_bo_unref(&bo);
+}
 
 /**
- * Surface management.
+ * vmw_user_dmabuf_alloc - Allocate a user dma buffer
+ *
+ * @dev_priv: Pointer to a struct device private.
+ * @tfile: Pointer to a struct ttm_object_file on which to register the user
+ * object.
+ * @size: Size of the dma buffer.
+ * @shareable: Boolean whether the buffer is shareable with other open files.
+ * @handle: Pointer to where the handle value should be assigned.
+ * @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer
+ * should be assigned.
  */
+int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
+                         struct ttm_object_file *tfile,
+                         uint32_t size,
+                         bool shareable,
+                         uint32_t *handle,
+                         struct vmw_dma_buffer **p_dma_buf)
+{
+       struct vmw_user_dma_buffer *user_bo;
+       struct ttm_buffer_object *tmp;
+       int ret;
 
-struct vmw_surface_dma {
-       SVGA3dCmdHeader header;
-       SVGA3dCmdSurfaceDMA body;
-       SVGA3dCopyBox cb;
-       SVGA3dCmdSurfaceDMASuffix suffix;
-};
+       user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
+       if (unlikely(user_bo == NULL)) {
+               DRM_ERROR("Failed to allocate a buffer.\n");
+               return -ENOMEM;
+       }
 
-struct vmw_surface_define {
-       SVGA3dCmdHeader header;
-       SVGA3dCmdDefineSurface body;
-};
+       ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size,
+                             &vmw_vram_sys_placement, true,
+                             &vmw_user_dmabuf_destroy);
+       if (unlikely(ret != 0))
+               return ret;
 
-struct vmw_surface_destroy {
-       SVGA3dCmdHeader header;
-       SVGA3dCmdDestroySurface body;
-};
+       tmp = ttm_bo_reference(&user_bo->dma.base);
+       ret = ttm_base_object_init(tfile,
+                                  &user_bo->base,
+                                  shareable,
+                                  ttm_buffer_type,
+                                  &vmw_user_dmabuf_release, NULL);
+       if (unlikely(ret != 0)) {
+               ttm_bo_unref(&tmp);
+               goto out_no_base_object;
+       }
 
+       *p_dma_buf = &user_bo->dma;
+       *handle = user_bo->base.hash.key;
 
-/**
- * vmw_surface_dma_size - Compute fifo size for a dma command.
- *
- * @srf: Pointer to a struct vmw_surface
- *
- * Computes the required size for a surface dma command for backup or
- * restoration of the surface represented by @srf.
- */
-static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf)
-{
-       return srf->num_sizes * sizeof(struct vmw_surface_dma);
+out_no_base_object:
+       return ret;
 }
 
-
 /**
- * vmw_surface_define_size - Compute fifo size for a surface define command.
- *
- * @srf: Pointer to a struct vmw_surface
+ * vmw_user_dmabuf_verify_access - verify access permissions on this
+ * buffer object.
  *
- * Computes the required size for a surface define command for the definition
- * of the surface represented by @srf.
+ * @bo: Pointer to the buffer object being accessed
+ * @tfile: Identifying the caller.
  */
-static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf)
+int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
+                                 struct ttm_object_file *tfile)
 {
-       return sizeof(struct vmw_surface_define) + srf->num_sizes *
-               sizeof(SVGA3dSize);
-}
+       struct vmw_user_dma_buffer *vmw_user_bo;
 
+       if (unlikely(bo->destroy != vmw_user_dmabuf_destroy))
+               return -EPERM;
 
-/**
- * vmw_surface_destroy_size - Compute fifo size for a surface destroy command.
- *
- * Computes the required size for a surface destroy command for the destruction
- * of a hw surface.
- */
-static inline uint32_t vmw_surface_destroy_size(void)
-{
-       return sizeof(struct vmw_surface_destroy);
+       vmw_user_bo = vmw_user_dma_buffer(bo);
+       return (vmw_user_bo->base.tfile == tfile ||
+       vmw_user_bo->base.shareable) ? 0 : -EPERM;
 }
 
-/**
- * vmw_surface_destroy_encode - Encode a surface_destroy command.
- *
- * @id: The surface id
- * @cmd_space: Pointer to memory area in which the commands should be encoded.
- */
-static void vmw_surface_destroy_encode(uint32_t id,
-                                      void *cmd_space)
+int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
+                          struct drm_file *file_priv)
 {
-       struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *)
-               cmd_space;
+       struct vmw_private *dev_priv = vmw_priv(dev);
+       union drm_vmw_alloc_dmabuf_arg *arg =
+           (union drm_vmw_alloc_dmabuf_arg *)data;
+       struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
+       struct drm_vmw_dmabuf_rep *rep = &arg->rep;
+       struct vmw_dma_buffer *dma_buf;
+       uint32_t handle;
+       struct vmw_master *vmaster = vmw_master(file_priv->master);
+       int ret;
 
-       cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY;
-       cmd->header.size = sizeof(cmd->body);
-       cmd->body.sid = id;
-}
+       ret = ttm_read_lock(&vmaster->lock, true);
+       if (unlikely(ret != 0))
+               return ret;
 
-/**
- * vmw_surface_define_encode - Encode a surface_define command.
- *
- * @srf: Pointer to a struct vmw_surface object.
- * @cmd_space: Pointer to memory area in which the commands should be encoded.
- */
-static void vmw_surface_define_encode(const struct vmw_surface *srf,
-                                     void *cmd_space)
-{
-       struct vmw_surface_define *cmd = (struct vmw_surface_define *)
-               cmd_space;
-       struct drm_vmw_size *src_size;
-       SVGA3dSize *cmd_size;
-       uint32_t cmd_len;
-       int i;
-
-       cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
-
-       cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
-       cmd->header.size = cmd_len;
-       cmd->body.sid = srf->res.id;
-       cmd->body.surfaceFlags = srf->flags;
-       cmd->body.format = cpu_to_le32(srf->format);
-       for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
-               cmd->body.face[i].numMipLevels = srf->mip_levels[i];
-
-       cmd += 1;
-       cmd_size = (SVGA3dSize *) cmd;
-       src_size = srf->sizes;
-
-       for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
-               cmd_size->width = src_size->width;
-               cmd_size->height = src_size->height;
-               cmd_size->depth = src_size->depth;
-       }
-}
-
-
-/**
- * vmw_surface_dma_encode - Encode a surface_dma command.
- *
- * @srf: Pointer to a struct vmw_surface object.
- * @cmd_space: Pointer to memory area in which the commands should be encoded.
- * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents
- * should be placed or read from.
- * @to_surface: Boolean whether to DMA to the surface or from the surface.
- */
-static void vmw_surface_dma_encode(struct vmw_surface *srf,
-                                  void *cmd_space,
-                                  const SVGAGuestPtr *ptr,
-                                  bool to_surface)
-{
-       uint32_t i;
-       uint32_t bpp = vmw_sf_bpp[srf->format].bpp;
-       uint32_t stride_bpp = vmw_sf_bpp[srf->format].s_bpp;
-       struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space;
-
-       for (i = 0; i < srf->num_sizes; ++i) {
-               SVGA3dCmdHeader *header = &cmd->header;
-               SVGA3dCmdSurfaceDMA *body = &cmd->body;
-               SVGA3dCopyBox *cb = &cmd->cb;
-               SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix;
-               const struct vmw_surface_offset *cur_offset = &srf->offsets[i];
-               const struct drm_vmw_size *cur_size = &srf->sizes[i];
-
-               header->id = SVGA_3D_CMD_SURFACE_DMA;
-               header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix);
-
-               body->guest.ptr = *ptr;
-               body->guest.ptr.offset += cur_offset->bo_offset;
-               body->guest.pitch = (cur_size->width * stride_bpp + 7) >> 3;
-               body->host.sid = srf->res.id;
-               body->host.face = cur_offset->face;
-               body->host.mipmap = cur_offset->mip;
-               body->transfer = ((to_surface) ?  SVGA3D_WRITE_HOST_VRAM :
-                                 SVGA3D_READ_HOST_VRAM);
-               cb->x = 0;
-               cb->y = 0;
-               cb->z = 0;
-               cb->srcx = 0;
-               cb->srcy = 0;
-               cb->srcz = 0;
-               cb->w = cur_size->width;
-               cb->h = cur_size->height;
-               cb->d = cur_size->depth;
-
-               suffix->suffixSize = sizeof(*suffix);
-               suffix->maximumOffset = body->guest.pitch*cur_size->height*
-                       cur_size->depth*bpp / stride_bpp;
-               suffix->flags.discard = 0;
-               suffix->flags.unsynchronized = 0;
-               suffix->flags.reserved = 0;
-               ++cmd;
-       }
-};
-
-
-static void vmw_hw_surface_destroy(struct vmw_resource *res)
-{
-
-       struct vmw_private *dev_priv = res->dev_priv;
-       struct vmw_surface *srf;
-       void *cmd;
-
-       if (res->id != -1) {
-
-               cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
-               if (unlikely(cmd == NULL)) {
-                       DRM_ERROR("Failed reserving FIFO space for surface "
-                                 "destruction.\n");
-                       return;
-               }
-
-               vmw_surface_destroy_encode(res->id, cmd);
-               vmw_fifo_commit(dev_priv, vmw_surface_destroy_size());
-
-               /*
-                * used_memory_size_atomic, or separate lock
-                * to avoid taking dev_priv::cmdbuf_mutex in
-                * the destroy path.
-                */
-
-               mutex_lock(&dev_priv->cmdbuf_mutex);
-               srf = container_of(res, struct vmw_surface, res);
-               dev_priv->used_memory_size -= srf->backup_size;
-               mutex_unlock(&dev_priv->cmdbuf_mutex);
-
-       }
-       vmw_3d_resource_dec(dev_priv, false);
-}
-
-void vmw_surface_res_free(struct vmw_resource *res)
-{
-       struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
-
-       if (srf->backup)
-               ttm_bo_unref(&srf->backup);
-       kfree(srf->offsets);
-       kfree(srf->sizes);
-       kfree(srf->snooper.image);
-       kfree(srf);
-}
-
-
-/**
- * vmw_surface_do_validate - make a surface available to the device.
- *
- * @dev_priv: Pointer to a device private struct.
- * @srf: Pointer to a struct vmw_surface.
- *
- * If the surface doesn't have a hw id, allocate one, and optionally
- * DMA the backed up surface contents to the device.
- *
- * Returns -EBUSY if there wasn't sufficient device resources to
- * complete the validation. Retry after freeing up resources.
- *
- * May return other errors if the kernel is out of guest resources.
- */
-int vmw_surface_do_validate(struct vmw_private *dev_priv,
-                           struct vmw_surface *srf)
-{
-       struct vmw_resource *res = &srf->res;
-       struct list_head val_list;
-       struct ttm_validate_buffer val_buf;
-       uint32_t submit_size;
-       uint8_t *cmd;
-       int ret;
-
-       if (likely(res->id != -1))
-               return 0;
-
-       if (unlikely(dev_priv->used_memory_size + srf->backup_size >=
-                    dev_priv->memory_size))
-               return -EBUSY;
-
-       /*
-        * Reserve- and validate the backup DMA bo.
-        */
-
-       if (srf->backup) {
-               INIT_LIST_HEAD(&val_list);
-               val_buf.bo = ttm_bo_reference(srf->backup);
-               val_buf.new_sync_obj_arg = (void *)((unsigned long)
-                                                   DRM_VMW_FENCE_FLAG_EXEC);
-               list_add_tail(&val_buf.head, &val_list);
-               ret = ttm_eu_reserve_buffers(&val_list);
-               if (unlikely(ret != 0))
-                       goto out_no_reserve;
-
-               ret = ttm_bo_validate(srf->backup, &vmw_srf_placement,
-                                     true, false, false);
-               if (unlikely(ret != 0))
-                       goto out_no_validate;
-       }
-
-       /*
-        * Alloc id for the resource.
-        */
-
-       ret = vmw_resource_alloc_id(dev_priv, res);
-       if (unlikely(ret != 0)) {
-               DRM_ERROR("Failed to allocate a surface id.\n");
-               goto out_no_id;
-       }
-       if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) {
-               ret = -EBUSY;
-               goto out_no_fifo;
-       }
-
-
-       /*
-        * Encode surface define- and dma commands.
-        */
-
-       submit_size = vmw_surface_define_size(srf);
-       if (srf->backup)
-               submit_size += vmw_surface_dma_size(srf);
-
-       cmd = vmw_fifo_reserve(dev_priv, submit_size);
-       if (unlikely(cmd == NULL)) {
-               DRM_ERROR("Failed reserving FIFO space for surface "
-                         "validation.\n");
-               ret = -ENOMEM;
-               goto out_no_fifo;
-       }
-
-       vmw_surface_define_encode(srf, cmd);
-       if (srf->backup) {
-               SVGAGuestPtr ptr;
-
-               cmd += vmw_surface_define_size(srf);
-               vmw_bo_get_guest_ptr(srf->backup, &ptr);
-               vmw_surface_dma_encode(srf, cmd, &ptr, true);
-       }
-
-       vmw_fifo_commit(dev_priv, submit_size);
-
-       /*
-        * Create a fence object and fence the backup buffer.
-        */
-
-       if (srf->backup) {
-               struct vmw_fence_obj *fence;
-
-               (void) vmw_execbuf_fence_commands(NULL, dev_priv,
-                                                 &fence, NULL);
-               ttm_eu_fence_buffer_objects(&val_list, fence);
-               if (likely(fence != NULL))
-                       vmw_fence_obj_unreference(&fence);
-               ttm_bo_unref(&val_buf.bo);
-               ttm_bo_unref(&srf->backup);
-       }
-
-       /*
-        * Surface memory usage accounting.
-        */
-
-       dev_priv->used_memory_size += srf->backup_size;
-
-       return 0;
-
-out_no_fifo:
-       vmw_resource_release_id(res);
-out_no_id:
-out_no_validate:
-       if (srf->backup)
-               ttm_eu_backoff_reservation(&val_list);
-out_no_reserve:
-       if (srf->backup)
-               ttm_bo_unref(&val_buf.bo);
-       return ret;
-}
-
-/**
- * vmw_surface_evict - Evict a hw surface.
- *
- * @dev_priv: Pointer to a device private struct.
- * @srf: Pointer to a struct vmw_surface
- *
- * DMA the contents of a hw surface to a backup guest buffer object,
- * and destroy the hw surface, releasing its id.
- */
-int vmw_surface_evict(struct vmw_private *dev_priv,
-                     struct vmw_surface *srf)
-{
-       struct vmw_resource *res = &srf->res;
-       struct list_head val_list;
-       struct ttm_validate_buffer val_buf;
-       uint32_t submit_size;
-       uint8_t *cmd;
-       int ret;
-       struct vmw_fence_obj *fence;
-       SVGAGuestPtr ptr;
-
-       BUG_ON(res->id == -1);
-
-       /*
-        * Create a surface backup buffer object.
-        */
-
-       if (!srf->backup) {
-               ret = ttm_bo_create(&dev_priv->bdev, srf->backup_size,
-                                   ttm_bo_type_device,
-                                   &vmw_srf_placement, 0, 0, true,
-                                   NULL, &srf->backup);
-               if (unlikely(ret != 0))
-                       return ret;
-       }
-
-       /*
-        * Reserve- and validate the backup DMA bo.
-        */
-
-       INIT_LIST_HEAD(&val_list);
-       val_buf.bo = ttm_bo_reference(srf->backup);
-       val_buf.new_sync_obj_arg = (void *)(unsigned long)
-               DRM_VMW_FENCE_FLAG_EXEC;
-       list_add_tail(&val_buf.head, &val_list);
-       ret = ttm_eu_reserve_buffers(&val_list);
-       if (unlikely(ret != 0))
-               goto out_no_reserve;
-
-       ret = ttm_bo_validate(srf->backup, &vmw_srf_placement,
-                             true, false, false);
-       if (unlikely(ret != 0))
-               goto out_no_validate;
-
-
-       /*
-        * Encode the dma- and surface destroy commands.
-        */
-
-       submit_size = vmw_surface_dma_size(srf) + vmw_surface_destroy_size();
-       cmd = vmw_fifo_reserve(dev_priv, submit_size);
-       if (unlikely(cmd == NULL)) {
-               DRM_ERROR("Failed reserving FIFO space for surface "
-                         "eviction.\n");
-               ret = -ENOMEM;
-               goto out_no_fifo;
-       }
-
-       vmw_bo_get_guest_ptr(srf->backup, &ptr);
-       vmw_surface_dma_encode(srf, cmd, &ptr, false);
-       cmd += vmw_surface_dma_size(srf);
-       vmw_surface_destroy_encode(res->id, cmd);
-       vmw_fifo_commit(dev_priv, submit_size);
-
-       /*
-        * Surface memory usage accounting.
-        */
-
-       dev_priv->used_memory_size -= srf->backup_size;
-
-       /*
-        * Create a fence object and fence the DMA buffer.
-        */
-
-       (void) vmw_execbuf_fence_commands(NULL, dev_priv,
-                                         &fence, NULL);
-       ttm_eu_fence_buffer_objects(&val_list, fence);
-       if (likely(fence != NULL))
-               vmw_fence_obj_unreference(&fence);
-       ttm_bo_unref(&val_buf.bo);
-
-       /*
-        * Release the surface ID.
-        */
-
-       vmw_resource_release_id(res);
-
-       return 0;
-
-out_no_fifo:
-out_no_validate:
-       if (srf->backup)
-               ttm_eu_backoff_reservation(&val_list);
-out_no_reserve:
-       ttm_bo_unref(&val_buf.bo);
-       ttm_bo_unref(&srf->backup);
-       return ret;
-}
-
-
-/**
- * vmw_surface_validate - make a surface available to the device, evicting
- * other surfaces if needed.
- *
- * @dev_priv: Pointer to a device private struct.
- * @srf: Pointer to a struct vmw_surface.
- *
- * Try to validate a surface and if it fails due to limited device resources,
- * repeatedly try to evict other surfaces until the request can be
- * acommodated.
- *
- * May return errors if out of resources.
- */
-int vmw_surface_validate(struct vmw_private *dev_priv,
-                        struct vmw_surface *srf)
-{
-       int ret;
-       struct vmw_surface *evict_srf;
-
-       do {
-               write_lock(&dev_priv->resource_lock);
-               list_del_init(&srf->lru_head);
-               write_unlock(&dev_priv->resource_lock);
-
-               ret = vmw_surface_do_validate(dev_priv, srf);
-               if (likely(ret != -EBUSY))
-                       break;
-
-               write_lock(&dev_priv->resource_lock);
-               if (list_empty(&dev_priv->surface_lru)) {
-                       DRM_ERROR("Out of device memory for surfaces.\n");
-                       ret = -EBUSY;
-                       write_unlock(&dev_priv->resource_lock);
-                       break;
-               }
-
-               evict_srf = vmw_surface_reference
-                       (list_first_entry(&dev_priv->surface_lru,
-                                         struct vmw_surface,
-                                         lru_head));
-               list_del_init(&evict_srf->lru_head);
-
-               write_unlock(&dev_priv->resource_lock);
-               (void) vmw_surface_evict(dev_priv, evict_srf);
-
-               vmw_surface_unreference(&evict_srf);
-
-       } while (1);
-
-       if (unlikely(ret != 0 && srf->res.id != -1)) {
-               write_lock(&dev_priv->resource_lock);
-               list_add_tail(&srf->lru_head, &dev_priv->surface_lru);
-               write_unlock(&dev_priv->resource_lock);
-       }
-
-       return ret;
-}
-
-
-/**
- * vmw_surface_remove_from_lists - Remove surface resources from lookup lists
- *
- * @res: Pointer to a struct vmw_resource embedded in a struct vmw_surface
- *
- * As part of the resource destruction, remove the surface from any
- * lookup lists.
- */
-static void vmw_surface_remove_from_lists(struct vmw_resource *res)
-{
-       struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
-
-       list_del_init(&srf->lru_head);
-}
-
-int vmw_surface_init(struct vmw_private *dev_priv,
-                    struct vmw_surface *srf,
-                    void (*res_free) (struct vmw_resource *res))
-{
-       int ret;
-       struct vmw_resource *res = &srf->res;
-
-       BUG_ON(res_free == NULL);
-       INIT_LIST_HEAD(&srf->lru_head);
-       ret = vmw_resource_init(dev_priv, res, &dev_priv->surface_idr,
-                               VMW_RES_SURFACE, true, res_free,
-                               vmw_surface_remove_from_lists);
-
-       if (unlikely(ret != 0))
-               res_free(res);
-
-       /*
-        * The surface won't be visible to hardware until a
-        * surface validate.
-        */
-
-       (void) vmw_3d_resource_inc(dev_priv, false);
-       vmw_resource_activate(res, vmw_hw_surface_destroy);
-       return ret;
-}
-
-static void vmw_user_surface_free(struct vmw_resource *res)
-{
-       struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
-       struct vmw_user_surface *user_srf =
-           container_of(srf, struct vmw_user_surface, srf);
-       struct vmw_private *dev_priv = srf->res.dev_priv;
-       uint32_t size = user_srf->size;
-
-       if (srf->backup)
-               ttm_bo_unref(&srf->backup);
-       kfree(srf->offsets);
-       kfree(srf->sizes);
-       kfree(srf->snooper.image);
-       kfree(user_srf);
-       ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
-}
-
-/**
- * vmw_resource_unreserve - unreserve resources previously reserved for
- * command submission.
- *
- * @list_head: list of resources to unreserve.
- *
- * Currently only surfaces are considered, and unreserving a surface
- * means putting it back on the device's surface lru list,
- * so that it can be evicted if necessary.
- * This function traverses the resource list and
- * checks whether resources are surfaces, and in that case puts them back
- * on the device's surface LRU list.
- */
-void vmw_resource_unreserve(struct list_head *list)
-{
-       struct vmw_resource *res;
-       struct vmw_surface *srf;
-       rwlock_t *lock = NULL;
-
-       list_for_each_entry(res, list, validate_head) {
-
-               if (res->res_free != &vmw_surface_res_free &&
-                   res->res_free != &vmw_user_surface_free)
-                       continue;
-
-               if (unlikely(lock == NULL)) {
-                       lock = &res->dev_priv->resource_lock;
-                       write_lock(lock);
-               }
-
-               srf = container_of(res, struct vmw_surface, res);
-               list_del_init(&srf->lru_head);
-               list_add_tail(&srf->lru_head, &res->dev_priv->surface_lru);
-       }
-
-       if (lock != NULL)
-               write_unlock(lock);
-}
-
-/**
- * Helper function that looks either a surface or dmabuf.
- *
- * The pointer this pointed at by out_surf and out_buf needs to be null.
- */
-int vmw_user_lookup_handle(struct vmw_private *dev_priv,
-                          struct ttm_object_file *tfile,
-                          uint32_t handle,
-                          struct vmw_surface **out_surf,
-                          struct vmw_dma_buffer **out_buf)
-{
-       int ret;
-
-       BUG_ON(*out_surf || *out_buf);
-
-       ret = vmw_user_surface_lookup_handle(dev_priv, tfile, handle, out_surf);
-       if (!ret)
-               return 0;
-
-       ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
-       return ret;
-}
-
-
-int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
-                                  struct ttm_object_file *tfile,
-                                  uint32_t handle, struct vmw_surface **out)
-{
-       struct vmw_resource *res;
-       struct vmw_surface *srf;
-       struct vmw_user_surface *user_srf;
-       struct ttm_base_object *base;
-       int ret = -EINVAL;
-
-       base = ttm_base_object_lookup(tfile, handle);
-       if (unlikely(base == NULL))
-               return -EINVAL;
-
-       if (unlikely(base->object_type != VMW_RES_SURFACE))
-               goto out_bad_resource;
-
-       user_srf = container_of(base, struct vmw_user_surface, base);
-       srf = &user_srf->srf;
-       res = &srf->res;
-
-       read_lock(&dev_priv->resource_lock);
-
-       if (!res->avail || res->res_free != &vmw_user_surface_free) {
-               read_unlock(&dev_priv->resource_lock);
-               goto out_bad_resource;
-       }
-
-       kref_get(&res->kref);
-       read_unlock(&dev_priv->resource_lock);
-
-       *out = srf;
-       ret = 0;
-
-out_bad_resource:
-       ttm_base_object_unref(&base);
-
-       return ret;
-}
-
-static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
-{
-       struct ttm_base_object *base = *p_base;
-       struct vmw_user_surface *user_srf =
-           container_of(base, struct vmw_user_surface, base);
-       struct vmw_resource *res = &user_srf->srf.res;
-
-       *p_base = NULL;
-       vmw_resource_unreference(&res);
-}
-
-int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
-                             struct drm_file *file_priv)
-{
-       struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
-       struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
-
-       return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
-}
-
-int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
-                            struct drm_file *file_priv)
-{
-       struct vmw_private *dev_priv = vmw_priv(dev);
-       struct vmw_user_surface *user_srf;
-       struct vmw_surface *srf;
-       struct vmw_resource *res;
-       struct vmw_resource *tmp;
-       union drm_vmw_surface_create_arg *arg =
-           (union drm_vmw_surface_create_arg *)data;
-       struct drm_vmw_surface_create_req *req = &arg->req;
-       struct drm_vmw_surface_arg *rep = &arg->rep;
-       struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
-       struct drm_vmw_size __user *user_sizes;
-       int ret;
-       int i, j;
-       uint32_t cur_bo_offset;
-       struct drm_vmw_size *cur_size;
-       struct vmw_surface_offset *cur_offset;
-       uint32_t stride_bpp;
-       uint32_t bpp;
-       uint32_t num_sizes;
-       uint32_t size;
-       struct vmw_master *vmaster = vmw_master(file_priv->master);
-
-       if (unlikely(vmw_user_surface_size == 0))
-               vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
-                       128;
-
-       num_sizes = 0;
-       for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
-               num_sizes += req->mip_levels[i];
-
-       if (num_sizes > DRM_VMW_MAX_SURFACE_FACES *
-           DRM_VMW_MAX_MIP_LEVELS)
-               return -EINVAL;
-
-       size = vmw_user_surface_size + 128 +
-               ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) +
-               ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset));
-
-
-       ret = ttm_read_lock(&vmaster->lock, true);
-       if (unlikely(ret != 0))
-               return ret;
-
-       ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
-                                  size, false, true);
-       if (unlikely(ret != 0)) {
-               if (ret != -ERESTARTSYS)
-                       DRM_ERROR("Out of graphics memory for surface"
-                                 " creation.\n");
-               goto out_unlock;
-       }
-
-       user_srf = kmalloc(sizeof(*user_srf), GFP_KERNEL);
-       if (unlikely(user_srf == NULL)) {
-               ret = -ENOMEM;
-               goto out_no_user_srf;
-       }
-
-       srf = &user_srf->srf;
-       res = &srf->res;
-
-       srf->flags = req->flags;
-       srf->format = req->format;
-       srf->scanout = req->scanout;
-       srf->backup = NULL;
-
-       memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
-       srf->num_sizes = num_sizes;
-       user_srf->size = size;
-
-       srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
-       if (unlikely(srf->sizes == NULL)) {
-               ret = -ENOMEM;
-               goto out_no_sizes;
-       }
-       srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets),
-                              GFP_KERNEL);
-       if (unlikely(srf->sizes == NULL)) {
-               ret = -ENOMEM;
-               goto out_no_offsets;
-       }
-
-       user_sizes = (struct drm_vmw_size __user *)(unsigned long)
-           req->size_addr;
-
-       ret = copy_from_user(srf->sizes, user_sizes,
-                            srf->num_sizes * sizeof(*srf->sizes));
-       if (unlikely(ret != 0)) {
-               ret = -EFAULT;
-               goto out_no_copy;
-       }
-
-       cur_bo_offset = 0;
-       cur_offset = srf->offsets;
-       cur_size = srf->sizes;
-
-       bpp = vmw_sf_bpp[srf->format].bpp;
-       stride_bpp = vmw_sf_bpp[srf->format].s_bpp;
-
-       for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
-               for (j = 0; j < srf->mip_levels[i]; ++j) {
-                       uint32_t stride =
-                               (cur_size->width * stride_bpp + 7) >> 3;
-
-                       cur_offset->face = i;
-                       cur_offset->mip = j;
-                       cur_offset->bo_offset = cur_bo_offset;
-                       cur_bo_offset += stride * cur_size->height *
-                               cur_size->depth * bpp / stride_bpp;
-                       ++cur_offset;
-                       ++cur_size;
-               }
-       }
-       srf->backup_size = cur_bo_offset;
-
-       if (srf->scanout &&
-           srf->num_sizes == 1 &&
-           srf->sizes[0].width == 64 &&
-           srf->sizes[0].height == 64 &&
-           srf->format == SVGA3D_A8R8G8B8) {
-
-               /* allocate image area and clear it */
-               srf->snooper.image = kzalloc(64 * 64 * 4, GFP_KERNEL);
-               if (!srf->snooper.image) {
-                       DRM_ERROR("Failed to allocate cursor_image\n");
-                       ret = -ENOMEM;
-                       goto out_no_copy;
-               }
-       } else {
-               srf->snooper.image = NULL;
-       }
-       srf->snooper.crtc = NULL;
-
-       user_srf->base.shareable = false;
-       user_srf->base.tfile = NULL;
-
-       /**
-        * From this point, the generic resource management functions
-        * destroy the object on failure.
-        */
-
-       ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
-       if (unlikely(ret != 0))
-               goto out_unlock;
-
-       tmp = vmw_resource_reference(&srf->res);
-       ret = ttm_base_object_init(tfile, &user_srf->base,
-                                  req->shareable, VMW_RES_SURFACE,
-                                  &vmw_user_surface_base_release, NULL);
-
-       if (unlikely(ret != 0)) {
-               vmw_resource_unreference(&tmp);
-               vmw_resource_unreference(&res);
-               goto out_unlock;
-       }
-
-       rep->sid = user_srf->base.hash.key;
-       if (rep->sid == SVGA3D_INVALID_ID)
-               DRM_ERROR("Created bad Surface ID.\n");
-
-       vmw_resource_unreference(&res);
-
-       ttm_read_unlock(&vmaster->lock);
-       return 0;
-out_no_copy:
-       kfree(srf->offsets);
-out_no_offsets:
-       kfree(srf->sizes);
-out_no_sizes:
-       kfree(user_srf);
-out_no_user_srf:
-       ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
-out_unlock:
-       ttm_read_unlock(&vmaster->lock);
-       return ret;
-}
-
-int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
-                               struct drm_file *file_priv)
-{
-       union drm_vmw_surface_reference_arg *arg =
-           (union drm_vmw_surface_reference_arg *)data;
-       struct drm_vmw_surface_arg *req = &arg->req;
-       struct drm_vmw_surface_create_req *rep = &arg->rep;
-       struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
-       struct vmw_surface *srf;
-       struct vmw_user_surface *user_srf;
-       struct drm_vmw_size __user *user_sizes;
-       struct ttm_base_object *base;
-       int ret = -EINVAL;
-
-       base = ttm_base_object_lookup(tfile, req->sid);
-       if (unlikely(base == NULL)) {
-               DRM_ERROR("Could not find surface to reference.\n");
-               return -EINVAL;
-       }
-
-       if (unlikely(base->object_type != VMW_RES_SURFACE))
-               goto out_bad_resource;
-
-       user_srf = container_of(base, struct vmw_user_surface, base);
-       srf = &user_srf->srf;
-
-       ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
-       if (unlikely(ret != 0)) {
-               DRM_ERROR("Could not add a reference to a surface.\n");
-               goto out_no_reference;
-       }
-
-       rep->flags = srf->flags;
-       rep->format = srf->format;
-       memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
-       user_sizes = (struct drm_vmw_size __user *)(unsigned long)
-           rep->size_addr;
-
-       if (user_sizes)
-               ret = copy_to_user(user_sizes, srf->sizes,
-                                  srf->num_sizes * sizeof(*srf->sizes));
-       if (unlikely(ret != 0)) {
-               DRM_ERROR("copy_to_user failed %p %u\n",
-                         user_sizes, srf->num_sizes);
-               ret = -EFAULT;
-       }
-out_bad_resource:
-out_no_reference:
-       ttm_base_object_unref(&base);
-
-       return ret;
-}
-
-int vmw_surface_check(struct vmw_private *dev_priv,
-                     struct ttm_object_file *tfile,
-                     uint32_t handle, int *id)
-{
-       struct ttm_base_object *base;
-       struct vmw_user_surface *user_srf;
-
-       int ret = -EPERM;
-
-       base = ttm_base_object_lookup(tfile, handle);
-       if (unlikely(base == NULL))
-               return -EINVAL;
-
-       if (unlikely(base->object_type != VMW_RES_SURFACE))
-               goto out_bad_surface;
-
-       user_srf = container_of(base, struct vmw_user_surface, base);
-       *id = user_srf->srf.res.id;
-       ret = 0;
-
-out_bad_surface:
-       /**
-        * FIXME: May deadlock here when called from the
-        * command parsing code.
-        */
-
-       ttm_base_object_unref(&base);
-       return ret;
-}
-
-/**
- * Buffer management.
- */
-void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
-{
-       struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
-
-       kfree(vmw_bo);
-}
-
-int vmw_dmabuf_init(struct vmw_private *dev_priv,
-                   struct vmw_dma_buffer *vmw_bo,
-                   size_t size, struct ttm_placement *placement,
-                   bool interruptible,
-                   void (*bo_free) (struct ttm_buffer_object *bo))
-{
-       struct ttm_bo_device *bdev = &dev_priv->bdev;
-       size_t acc_size;
-       int ret;
-
-       BUG_ON(!bo_free);
-
-       acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct vmw_dma_buffer));
-       memset(vmw_bo, 0, sizeof(*vmw_bo));
-
-       INIT_LIST_HEAD(&vmw_bo->validate_list);
-
-       ret = ttm_bo_init(bdev, &vmw_bo->base, size,
-                         ttm_bo_type_device, placement,
-                         0, 0, interruptible,
-                         NULL, acc_size, NULL, bo_free);
-       return ret;
-}
-
-static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
-{
-       struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
-
-       kfree(vmw_user_bo);
-}
-
-static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
-{
-       struct vmw_user_dma_buffer *vmw_user_bo;
-       struct ttm_base_object *base = *p_base;
-       struct ttm_buffer_object *bo;
-
-       *p_base = NULL;
-
-       if (unlikely(base == NULL))
-               return;
-
-       vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
-       bo = &vmw_user_bo->dma.base;
-       ttm_bo_unref(&bo);
-}
-
-int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
-                          struct drm_file *file_priv)
-{
-       struct vmw_private *dev_priv = vmw_priv(dev);
-       union drm_vmw_alloc_dmabuf_arg *arg =
-           (union drm_vmw_alloc_dmabuf_arg *)data;
-       struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
-       struct drm_vmw_dmabuf_rep *rep = &arg->rep;
-       struct vmw_user_dma_buffer *vmw_user_bo;
-       struct ttm_buffer_object *tmp;
-       struct vmw_master *vmaster = vmw_master(file_priv->master);
-       int ret;
-
-       vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
-       if (unlikely(vmw_user_bo == NULL))
-               return -ENOMEM;
-
-       ret = ttm_read_lock(&vmaster->lock, true);
-       if (unlikely(ret != 0)) {
-               kfree(vmw_user_bo);
-               return ret;
-       }
-
-       ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size,
-                             &vmw_vram_sys_placement, true,
-                             &vmw_user_dmabuf_destroy);
-       if (unlikely(ret != 0))
-               goto out_no_dmabuf;
-
-       tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
-       ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
-                                  &vmw_user_bo->base,
-                                  false,
-                                  ttm_buffer_type,
-                                  &vmw_user_dmabuf_release, NULL);
-       if (unlikely(ret != 0))
-               goto out_no_base_object;
-       else {
-               rep->handle = vmw_user_bo->base.hash.key;
-               rep->map_handle = vmw_user_bo->dma.base.addr_space_offset;
-               rep->cur_gmr_id = vmw_user_bo->base.hash.key;
-               rep->cur_gmr_offset = 0;
-       }
+       ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
+                                   req->size, false, &handle, &dma_buf);
+       if (unlikely(ret != 0))
+               goto out_no_dmabuf;
+
+       rep->handle = handle;
+       rep->map_handle = dma_buf->base.addr_space_offset;
+       rep->cur_gmr_id = handle;
+       rep->cur_gmr_offset = 0;
+
+       vmw_dmabuf_unreference(&dma_buf);
 
-out_no_base_object:
-       ttm_bo_unref(&tmp);
 out_no_dmabuf:
        ttm_read_unlock(&vmaster->lock);
 
@@ -1657,27 +524,6 @@ int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
                                         TTM_REF_USAGE);
 }
 
-uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
-                                 uint32_t cur_validate_node)
-{
-       struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
-
-       if (likely(vmw_bo->on_validate_list))
-               return vmw_bo->cur_validate_node;
-
-       vmw_bo->cur_validate_node = cur_validate_node;
-       vmw_bo->on_validate_list = true;
-
-       return cur_validate_node;
-}
-
-void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo)
-{
-       struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
-
-       vmw_bo->on_validate_list = false;
-}
-
 int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
                           uint32_t handle, struct vmw_dma_buffer **out)
 {
@@ -1706,6 +552,18 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
        return 0;
 }
 
+int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
+                             struct vmw_dma_buffer *dma_buf)
+{
+       struct vmw_user_dma_buffer *user_bo;
+
+       if (dma_buf->base.destroy != vmw_user_dmabuf_destroy)
+               return -EINVAL;
+
+       user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
+       return ttm_ref_object_add(tfile, &user_bo->base, TTM_REF_USAGE, NULL);
+}
+
 /*
  * Stream management
  */
@@ -1730,8 +588,8 @@ static int vmw_stream_init(struct vmw_private *dev_priv,
        struct vmw_resource *res = &stream->res;
        int ret;
 
-       ret = vmw_resource_init(dev_priv, res, &dev_priv->stream_idr,
-                               VMW_RES_STREAM, false, res_free, NULL);
+       ret = vmw_resource_init(dev_priv, res, false, res_free,
+                               &vmw_stream_func);
 
        if (unlikely(ret != 0)) {
                if (res_free == NULL)
@@ -1753,17 +611,13 @@ static int vmw_stream_init(struct vmw_private *dev_priv,
        return 0;
 }
 
-/**
- * User-space context management:
- */
-
 static void vmw_user_stream_free(struct vmw_resource *res)
 {
        struct vmw_user_stream *stream =
            container_of(res, struct vmw_user_stream, stream.res);
        struct vmw_private *dev_priv = res->dev_priv;
 
-       kfree(stream);
+       ttm_base_object_kfree(stream, base);
        ttm_mem_global_free(vmw_mem_glob(dev_priv),
                            vmw_user_stream_size);
 }
@@ -1792,9 +646,11 @@ int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
        struct vmw_user_stream *stream;
        struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
        struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+       struct idr *idr = &dev_priv->res_idr[vmw_res_stream];
        int ret = 0;
 
-       res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, arg->stream_id);
+
+       res = vmw_resource_lookup(dev_priv, idr, arg->stream_id);
        if (unlikely(res == NULL))
                return -EINVAL;
 
@@ -1895,7 +751,8 @@ int vmw_user_stream_lookup(struct vmw_private *dev_priv,
        struct vmw_resource *res;
        int ret;
 
-       res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, *inout_id);
+       res = vmw_resource_lookup(dev_priv, &dev_priv->res_idr[vmw_res_stream],
+                                 *inout_id);
        if (unlikely(res == NULL))
                return -EINVAL;
 
@@ -1990,3 +847,453 @@ int vmw_dumb_destroy(struct drm_file *file_priv,
        return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
                                         handle, TTM_REF_USAGE);
 }
+
+/**
+ * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
+ *
+ * @res:            The resource for which to allocate a backup buffer.
+ * @interruptible:  Whether any sleeps during allocation should be
+ *                  performed while interruptible.
+ */
+static int vmw_resource_buf_alloc(struct vmw_resource *res,
+                                 bool interruptible)
+{
+       unsigned long size =
+               (res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
+       struct vmw_dma_buffer *backup;
+       int ret;
+
+       if (likely(res->backup)) {
+               BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
+               return 0;
+       }
+
+       backup = kzalloc(sizeof(*backup), GFP_KERNEL);
+       if (unlikely(backup == NULL))
+               return -ENOMEM;
+
+       ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size,
+                             res->func->backup_placement,
+                             interruptible,
+                             &vmw_dmabuf_bo_free);
+       if (unlikely(ret != 0))
+               goto out_no_dmabuf;
+
+       res->backup = backup;
+
+out_no_dmabuf:
+       return ret;
+}
+
+/**
+ * vmw_resource_do_validate - Make a resource up-to-date and visible
+ *                            to the device.
+ *
+ * @res:            The resource to make visible to the device.
+ * @val_buf:        Information about a buffer possibly
+ *                  containing backup data if a bind operation is needed.
+ *
+ * On hardware resource shortage, this function returns -EBUSY and
+ * should be retried once resources have been freed up.
+ */
+static int vmw_resource_do_validate(struct vmw_resource *res,
+                                   struct ttm_validate_buffer *val_buf)
+{
+       int ret = 0;
+       const struct vmw_res_func *func = res->func;
+
+       if (unlikely(res->id == -1)) {
+               ret = func->create(res);
+               if (unlikely(ret != 0))
+                       return ret;
+       }
+
+       if (func->bind &&
+           ((func->needs_backup && list_empty(&res->mob_head) &&
+             val_buf->bo != NULL) ||
+            (!func->needs_backup && val_buf->bo != NULL))) {
+               ret = func->bind(res, val_buf);
+               if (unlikely(ret != 0))
+                       goto out_bind_failed;
+               if (func->needs_backup)
+                       list_add_tail(&res->mob_head, &res->backup->res_list);
+       }
+
+       /*
+        * Only do this on write operations, and move to
+        * vmw_resource_unreserve if it can be called after
+        * backup buffers have been unreserved. Otherwise
+        * sort out locking.
+        */
+       res->res_dirty = true;
+
+       return 0;
+
+out_bind_failed:
+       func->destroy(res);
+
+       return ret;
+}
+
+/**
+ * vmw_resource_unreserve - Unreserve a resource previously reserved for
+ * command submission.
+ *
+ * @res:               Pointer to the struct vmw_resource to unreserve.
+ * @new_backup:        Pointer to new backup buffer if command submission
+ *                     switched.
+ * @new_backup_offset: New backup offset if @new_backup is !NULL.
+ *
+ * Currently unreserving a resource means putting it back on the device's
+ * resource lru list, so that it can be evicted if necessary.
+ */
+void vmw_resource_unreserve(struct vmw_resource *res,
+                           struct vmw_dma_buffer *new_backup,
+                           unsigned long new_backup_offset)
+{
+       struct vmw_private *dev_priv = res->dev_priv;
+
+       if (!list_empty(&res->lru_head))
+               return;
+
+       if (new_backup && new_backup != res->backup) {
+
+               if (res->backup) {
+                       BUG_ON(atomic_read(&res->backup->base.reserved) == 0);
+                       list_del_init(&res->mob_head);
+                       vmw_dmabuf_unreference(&res->backup);
+               }
+
+               res->backup = vmw_dmabuf_reference(new_backup);
+               BUG_ON(atomic_read(&new_backup->base.reserved) == 0);
+               list_add_tail(&res->mob_head, &new_backup->res_list);
+       }
+       if (new_backup)
+               res->backup_offset = new_backup_offset;
+
+       if (!res->func->may_evict)
+               return;
+
+       write_lock(&dev_priv->resource_lock);
+       list_add_tail(&res->lru_head,
+                     &res->dev_priv->res_lru[res->func->res_type]);
+       write_unlock(&dev_priv->resource_lock);
+}
+
+/**
+ * vmw_resource_check_buffer - Check whether a backup buffer is needed
+ *                             for a resource and in that case, allocate
+ *                             one, reserve and validate it.
+ *
+ * @res:            The resource for which to allocate a backup buffer.
+ * @interruptible:  Whether any sleeps during allocation should be
+ *                  performed while interruptible.
+ * @val_buf:        On successful return contains data about the
+ *                  reserved and validated backup buffer.
+ */
+int vmw_resource_check_buffer(struct vmw_resource *res,
+                             bool interruptible,
+                             struct ttm_validate_buffer *val_buf)
+{
+       struct list_head val_list;
+       bool backup_dirty = false;
+       int ret;
+
+       if (unlikely(res->backup == NULL)) {
+               ret = vmw_resource_buf_alloc(res, interruptible);
+               if (unlikely(ret != 0))
+                       return ret;
+       }
+
+       INIT_LIST_HEAD(&val_list);
+       val_buf->bo = ttm_bo_reference(&res->backup->base);
+       list_add_tail(&val_buf->head, &val_list);
+       ret = ttm_eu_reserve_buffers(&val_list);
+       if (unlikely(ret != 0))
+               goto out_no_reserve;
+
+       if (res->func->needs_backup && list_empty(&res->mob_head))
+               return 0;
+
+       backup_dirty = res->backup_dirty;
+       ret = ttm_bo_validate(&res->backup->base,
+                             res->func->backup_placement,
+                             true, false);
+
+       if (unlikely(ret != 0))
+               goto out_no_validate;
+
+       return 0;
+
+out_no_validate:
+       ttm_eu_backoff_reservation(&val_list);
+out_no_reserve:
+       ttm_bo_unref(&val_buf->bo);
+       if (backup_dirty)
+               vmw_dmabuf_unreference(&res->backup);
+
+       return ret;
+}
+
+/**
+ * vmw_resource_reserve - Reserve a resource for command submission
+ *
+ * @res:            The resource to reserve.
+ *
+ * This function takes the resource off the LRU list and make sure
+ * a backup buffer is present for guest-backed resources. However,
+ * the buffer may not be bound to the resource at this point.
+ *
+ */
+int vmw_resource_reserve(struct vmw_resource *res, bool no_backup)
+{
+       struct vmw_private *dev_priv = res->dev_priv;
+       int ret;
+
+       write_lock(&dev_priv->resource_lock);
+       list_del_init(&res->lru_head);
+       write_unlock(&dev_priv->resource_lock);
+
+       if (res->func->needs_backup && res->backup == NULL &&
+           !no_backup) {
+               ret = vmw_resource_buf_alloc(res, true);
+               if (unlikely(ret != 0))
+                       return ret;
+       }
+
+       return 0;
+}
+
+/**
+ * vmw_resource_backoff_reservation - Unreserve and unreference a
+ *                                    backup buffer
+ *.
+ * @val_buf:        Backup buffer information.
+ */
+void vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
+{
+       struct list_head val_list;
+
+       if (likely(val_buf->bo == NULL))
+               return;
+
+       INIT_LIST_HEAD(&val_list);
+       list_add_tail(&val_buf->head, &val_list);
+       ttm_eu_backoff_reservation(&val_list);
+       ttm_bo_unref(&val_buf->bo);
+}
+
+/**
+ * vmw_resource_do_evict - Evict a resource, and transfer its data
+ *                         to a backup buffer.
+ *
+ * @res:            The resource to evict.
+ */
+int vmw_resource_do_evict(struct vmw_resource *res)
+{
+       struct ttm_validate_buffer val_buf;
+       const struct vmw_res_func *func = res->func;
+       int ret;
+
+       BUG_ON(!func->may_evict);
+
+       val_buf.bo = NULL;
+       ret = vmw_resource_check_buffer(res, true, &val_buf);
+       if (unlikely(ret != 0))
+               return ret;
+
+       if (unlikely(func->unbind != NULL &&
+                    (!func->needs_backup || !list_empty(&res->mob_head)))) {
+               ret = func->unbind(res, res->res_dirty, &val_buf);
+               if (unlikely(ret != 0))
+                       goto out_no_unbind;
+               list_del_init(&res->mob_head);
+       }
+       ret = func->destroy(res);
+       res->backup_dirty = true;
+       res->res_dirty = false;
+out_no_unbind:
+       vmw_resource_backoff_reservation(&val_buf);
+
+       return ret;
+}
+
+
+/**
+ * vmw_resource_validate - Make a resource up-to-date and visible
+ *                         to the device.
+ *
+ * @res:            The resource to make visible to the device.
+ *
+ * On succesful return, any backup DMA buffer pointed to by @res->backup will
+ * be reserved and validated.
+ * On hardware resource shortage, this function will repeatedly evict
+ * resources of the same type until the validation succeeds.
+ */
+int vmw_resource_validate(struct vmw_resource *res)
+{
+       int ret;
+       struct vmw_resource *evict_res;
+       struct vmw_private *dev_priv = res->dev_priv;
+       struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
+       struct ttm_validate_buffer val_buf;
+
+       if (likely(!res->func->may_evict))
+               return 0;
+
+       val_buf.bo = NULL;
+       if (res->backup)
+               val_buf.bo = &res->backup->base;
+       do {
+               ret = vmw_resource_do_validate(res, &val_buf);
+               if (likely(ret != -EBUSY))
+                       break;
+
+               write_lock(&dev_priv->resource_lock);
+               if (list_empty(lru_list) || !res->func->may_evict) {
+                       DRM_ERROR("Out of device device id entries "
+                                 "for %s.\n", res->func->type_name);
+                       ret = -EBUSY;
+                       write_unlock(&dev_priv->resource_lock);
+                       break;
+               }
+
+               evict_res = vmw_resource_reference
+                       (list_first_entry(lru_list, struct vmw_resource,
+                                         lru_head));
+               list_del_init(&evict_res->lru_head);
+
+               write_unlock(&dev_priv->resource_lock);
+               vmw_resource_do_evict(evict_res);
+               vmw_resource_unreference(&evict_res);
+       } while (1);
+
+       if (unlikely(ret != 0))
+               goto out_no_validate;
+       else if (!res->func->needs_backup && res->backup) {
+               list_del_init(&res->mob_head);
+               vmw_dmabuf_unreference(&res->backup);
+       }
+
+       return 0;
+
+out_no_validate:
+       return ret;
+}
+
+/**
+ * vmw_fence_single_bo - Utility function to fence a single TTM buffer
+ *                       object without unreserving it.
+ *
+ * @bo:             Pointer to the struct ttm_buffer_object to fence.
+ * @fence:          Pointer to the fence. If NULL, this function will
+ *                  insert a fence into the command stream..
+ *
+ * Contrary to the ttm_eu version of this function, it takes only
+ * a single buffer object instead of a list, and it also doesn't
+ * unreserve the buffer object, which needs to be done separately.
+ */
+void vmw_fence_single_bo(struct ttm_buffer_object *bo,
+                        struct vmw_fence_obj *fence)
+{
+       struct ttm_bo_device *bdev = bo->bdev;
+       struct ttm_bo_driver *driver = bdev->driver;
+       struct vmw_fence_obj *old_fence_obj;
+       struct vmw_private *dev_priv =
+               container_of(bdev, struct vmw_private, bdev);
+
+       if (fence == NULL)
+               vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+       else
+               driver->sync_obj_ref(fence);
+
+       spin_lock(&bdev->fence_lock);
+
+       old_fence_obj = bo->sync_obj;
+       bo->sync_obj = fence;
+
+       spin_unlock(&bdev->fence_lock);
+
+       if (old_fence_obj)
+               vmw_fence_obj_unreference(&old_fence_obj);
+}
+
+/**
+ * vmw_resource_move_notify - TTM move_notify_callback
+ *
+ * @bo:             The TTM buffer object about to move.
+ * @mem:            The truct ttm_mem_reg indicating to what memory
+ *                  region the move is taking place.
+ *
+ * For now does nothing.
+ */
+void vmw_resource_move_notify(struct ttm_buffer_object *bo,
+                             struct ttm_mem_reg *mem)
+{
+}
+
+/**
+ * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
+ *
+ * @res:            The resource being queried.
+ */
+bool vmw_resource_needs_backup(const struct vmw_resource *res)
+{
+       return res->func->needs_backup;
+}
+
+/**
+ * vmw_resource_evict_type - Evict all resources of a specific type
+ *
+ * @dev_priv:       Pointer to a device private struct
+ * @type:           The resource type to evict
+ *
+ * To avoid thrashing starvation or as part of the hibernation sequence,
+ * evict all evictable resources of a specific type.
+ */
+static void vmw_resource_evict_type(struct vmw_private *dev_priv,
+                                   enum vmw_res_type type)
+{
+       struct list_head *lru_list = &dev_priv->res_lru[type];
+       struct vmw_resource *evict_res;
+
+       do {
+               write_lock(&dev_priv->resource_lock);
+
+               if (list_empty(lru_list))
+                       goto out_unlock;
+
+               evict_res = vmw_resource_reference(
+                       list_first_entry(lru_list, struct vmw_resource,
+                                        lru_head));
+               list_del_init(&evict_res->lru_head);
+               write_unlock(&dev_priv->resource_lock);
+               vmw_resource_do_evict(evict_res);
+               vmw_resource_unreference(&evict_res);
+       } while (1);
+
+out_unlock:
+       write_unlock(&dev_priv->resource_lock);
+}
+
+/**
+ * vmw_resource_evict_all - Evict all evictable resources
+ *
+ * @dev_priv:       Pointer to a device private struct
+ *
+ * To avoid thrashing starvation or as part of the hibernation sequence,
+ * evict all evictable resources. In particular this means that all
+ * guest-backed resources that are registered with the device are
+ * evicted and the OTable becomes clean.
+ */
+void vmw_resource_evict_all(struct vmw_private *dev_priv)
+{
+       enum vmw_res_type type;
+
+       mutex_lock(&dev_priv->cmdbuf_mutex);
+
+       for (type = 0; type < vmw_res_max; ++type)
+               vmw_resource_evict_type(dev_priv, type);
+
+       mutex_unlock(&dev_priv->cmdbuf_mutex);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
new file mode 100644 (file)
index 0000000..f3adeed
--- /dev/null
@@ -0,0 +1,84 @@
+/**************************************************************************
+ *
+ * Copyright Â© 2012 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef _VMWGFX_RESOURCE_PRIV_H_
+#define _VMWGFX_RESOURCE_PRIV_H_
+
+#include "vmwgfx_drv.h"
+
+/**
+ * struct vmw_user_resource_conv - Identify a derived user-exported resource
+ * type and provide a function to convert its ttm_base_object pointer to
+ * a struct vmw_resource
+ */
+struct vmw_user_resource_conv {
+       enum ttm_object_type object_type;
+       struct vmw_resource *(*base_obj_to_res)(struct ttm_base_object *base);
+       void (*res_free) (struct vmw_resource *res);
+};
+
+/**
+ * struct vmw_res_func - members and functions common for a resource type
+ *
+ * @res_type:          Enum that identifies the lru list to use for eviction.
+ * @needs_backup:      Whether the resource is guest-backed and needs
+ *                     persistent buffer storage.
+ * @type_name:         String that identifies the resource type.
+ * @backup_placement:  TTM placement for backup buffers.
+ * @may_evict          Whether the resource may be evicted.
+ * @create:            Create a hardware resource.
+ * @destroy:           Destroy a hardware resource.
+ * @bind:              Bind a hardware resource to persistent buffer storage.
+ * @unbind:            Unbind a hardware resource from persistent
+ *                     buffer storage.
+ */
+
+struct vmw_res_func {
+       enum vmw_res_type res_type;
+       bool needs_backup;
+       const char *type_name;
+       struct ttm_placement *backup_placement;
+       bool may_evict;
+
+       int (*create) (struct vmw_resource *res);
+       int (*destroy) (struct vmw_resource *res);
+       int (*bind) (struct vmw_resource *res,
+                    struct ttm_validate_buffer *val_buf);
+       int (*unbind) (struct vmw_resource *res,
+                      bool readback,
+                      struct ttm_validate_buffer *val_buf);
+};
+
+int vmw_resource_alloc_id(struct vmw_resource *res);
+void vmw_resource_release_id(struct vmw_resource *res);
+int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
+                     bool delay_id,
+                     void (*res_free) (struct vmw_resource *res),
+                     const struct vmw_res_func *func);
+void vmw_resource_activate(struct vmw_resource *res,
+                          void (*hw_destroy) (struct vmw_resource *));
+#endif
index 6deaf2f..26387c3 100644 (file)
@@ -468,7 +468,7 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
 
        drm_mode_crtc_set_gamma_size(crtc, 256);
 
-       drm_connector_attach_property(connector,
+       drm_object_attach_property(&connector->base,
                                      dev->mode_config.dirty_info_property,
                                      1);
 
@@ -485,7 +485,7 @@ int vmw_kms_init_screen_object_display(struct vmw_private *dev_priv)
                return -EINVAL;
        }
 
-       if (!(dev_priv->fifo.capabilities & SVGA_FIFO_CAP_SCREEN_OBJECT_2)) {
+       if (!(dev_priv->capabilities & SVGA_CAP_SCREEN_OBJECT_2)) {
                DRM_INFO("Not using screen objects,"
                         " missing cap SCREEN_OBJECT_2\n");
                return -ENOSYS;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
new file mode 100644 (file)
index 0000000..5828143
--- /dev/null
@@ -0,0 +1,893 @@
+/**************************************************************************
+ *
+ * Copyright Â© 2009-2012 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+#include "vmwgfx_resource_priv.h"
+#include <ttm/ttm_placement.h>
+#include "svga3d_surfacedefs.h"
+
+/**
+ * struct vmw_user_surface - User-space visible surface resource
+ *
+ * @base:           The TTM base object handling user-space visibility.
+ * @srf:            The surface metadata.
+ * @size:           TTM accounting size for the surface.
+ */
+struct vmw_user_surface {
+       struct ttm_base_object base;
+       struct vmw_surface srf;
+       uint32_t size;
+       uint32_t backup_handle;
+};
+
+/**
+ * struct vmw_surface_offset - Backing store mip level offset info
+ *
+ * @face:           Surface face.
+ * @mip:            Mip level.
+ * @bo_offset:      Offset into backing store of this mip level.
+ *
+ */
+struct vmw_surface_offset {
+       uint32_t face;
+       uint32_t mip;
+       uint32_t bo_offset;
+};
+
+static void vmw_user_surface_free(struct vmw_resource *res);
+static struct vmw_resource *
+vmw_user_surface_base_to_res(struct ttm_base_object *base);
+static int vmw_legacy_srf_bind(struct vmw_resource *res,
+                              struct ttm_validate_buffer *val_buf);
+static int vmw_legacy_srf_unbind(struct vmw_resource *res,
+                                bool readback,
+                                struct ttm_validate_buffer *val_buf);
+static int vmw_legacy_srf_create(struct vmw_resource *res);
+static int vmw_legacy_srf_destroy(struct vmw_resource *res);
+
+static const struct vmw_user_resource_conv user_surface_conv = {
+       .object_type = VMW_RES_SURFACE,
+       .base_obj_to_res = vmw_user_surface_base_to_res,
+       .res_free = vmw_user_surface_free
+};
+
+const struct vmw_user_resource_conv *user_surface_converter =
+       &user_surface_conv;
+
+
+static uint64_t vmw_user_surface_size;
+
+static const struct vmw_res_func vmw_legacy_surface_func = {
+       .res_type = vmw_res_surface,
+       .needs_backup = false,
+       .may_evict = true,
+       .type_name = "legacy surfaces",
+       .backup_placement = &vmw_srf_placement,
+       .create = &vmw_legacy_srf_create,
+       .destroy = &vmw_legacy_srf_destroy,
+       .bind = &vmw_legacy_srf_bind,
+       .unbind = &vmw_legacy_srf_unbind
+};
+
+/**
+ * struct vmw_surface_dma - SVGA3D DMA command
+ */
+struct vmw_surface_dma {
+       SVGA3dCmdHeader header;
+       SVGA3dCmdSurfaceDMA body;
+       SVGA3dCopyBox cb;
+       SVGA3dCmdSurfaceDMASuffix suffix;
+};
+
+/**
+ * struct vmw_surface_define - SVGA3D Surface Define command
+ */
+struct vmw_surface_define {
+       SVGA3dCmdHeader header;
+       SVGA3dCmdDefineSurface body;
+};
+
+/**
+ * struct vmw_surface_destroy - SVGA3D Surface Destroy command
+ */
+struct vmw_surface_destroy {
+       SVGA3dCmdHeader header;
+       SVGA3dCmdDestroySurface body;
+};
+
+
+/**
+ * vmw_surface_dma_size - Compute fifo size for a dma command.
+ *
+ * @srf: Pointer to a struct vmw_surface
+ *
+ * Computes the required size for a surface dma command for backup or
+ * restoration of the surface represented by @srf.
+ */
+static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf)
+{
+       return srf->num_sizes * sizeof(struct vmw_surface_dma);
+}
+
+
+/**
+ * vmw_surface_define_size - Compute fifo size for a surface define command.
+ *
+ * @srf: Pointer to a struct vmw_surface
+ *
+ * Computes the required size for a surface define command for the definition
+ * of the surface represented by @srf.
+ */
+static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf)
+{
+       return sizeof(struct vmw_surface_define) + srf->num_sizes *
+               sizeof(SVGA3dSize);
+}
+
+
+/**
+ * vmw_surface_destroy_size - Compute fifo size for a surface destroy command.
+ *
+ * Computes the required size for a surface destroy command for the destruction
+ * of a hw surface.
+ */
+static inline uint32_t vmw_surface_destroy_size(void)
+{
+       return sizeof(struct vmw_surface_destroy);
+}
+
+/**
+ * vmw_surface_destroy_encode - Encode a surface_destroy command.
+ *
+ * @id: The surface id
+ * @cmd_space: Pointer to memory area in which the commands should be encoded.
+ */
+static void vmw_surface_destroy_encode(uint32_t id,
+                                      void *cmd_space)
+{
+       struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *)
+               cmd_space;
+
+       cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY;
+       cmd->header.size = sizeof(cmd->body);
+       cmd->body.sid = id;
+}
+
+/**
+ * vmw_surface_define_encode - Encode a surface_define command.
+ *
+ * @srf: Pointer to a struct vmw_surface object.
+ * @cmd_space: Pointer to memory area in which the commands should be encoded.
+ */
+static void vmw_surface_define_encode(const struct vmw_surface *srf,
+                                     void *cmd_space)
+{
+       struct vmw_surface_define *cmd = (struct vmw_surface_define *)
+               cmd_space;
+       struct drm_vmw_size *src_size;
+       SVGA3dSize *cmd_size;
+       uint32_t cmd_len;
+       int i;
+
+       cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
+
+       cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
+       cmd->header.size = cmd_len;
+       cmd->body.sid = srf->res.id;
+       cmd->body.surfaceFlags = srf->flags;
+       cmd->body.format = cpu_to_le32(srf->format);
+       for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
+               cmd->body.face[i].numMipLevels = srf->mip_levels[i];
+
+       cmd += 1;
+       cmd_size = (SVGA3dSize *) cmd;
+       src_size = srf->sizes;
+
+       for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
+               cmd_size->width = src_size->width;
+               cmd_size->height = src_size->height;
+               cmd_size->depth = src_size->depth;
+       }
+}
+
+/**
+ * vmw_surface_dma_encode - Encode a surface_dma command.
+ *
+ * @srf: Pointer to a struct vmw_surface object.
+ * @cmd_space: Pointer to memory area in which the commands should be encoded.
+ * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents
+ * should be placed or read from.
+ * @to_surface: Boolean whether to DMA to the surface or from the surface.
+ */
+static void vmw_surface_dma_encode(struct vmw_surface *srf,
+                                  void *cmd_space,
+                                  const SVGAGuestPtr *ptr,
+                                  bool to_surface)
+{
+       uint32_t i;
+       struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space;
+       const struct svga3d_surface_desc *desc =
+               svga3dsurface_get_desc(srf->format);
+
+       for (i = 0; i < srf->num_sizes; ++i) {
+               SVGA3dCmdHeader *header = &cmd->header;
+               SVGA3dCmdSurfaceDMA *body = &cmd->body;
+               SVGA3dCopyBox *cb = &cmd->cb;
+               SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix;
+               const struct vmw_surface_offset *cur_offset = &srf->offsets[i];
+               const struct drm_vmw_size *cur_size = &srf->sizes[i];
+
+               header->id = SVGA_3D_CMD_SURFACE_DMA;
+               header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix);
+
+               body->guest.ptr = *ptr;
+               body->guest.ptr.offset += cur_offset->bo_offset;
+               body->guest.pitch = svga3dsurface_calculate_pitch(desc,
+                                                                 cur_size);
+               body->host.sid = srf->res.id;
+               body->host.face = cur_offset->face;
+               body->host.mipmap = cur_offset->mip;
+               body->transfer = ((to_surface) ?  SVGA3D_WRITE_HOST_VRAM :
+                                 SVGA3D_READ_HOST_VRAM);
+               cb->x = 0;
+               cb->y = 0;
+               cb->z = 0;
+               cb->srcx = 0;
+               cb->srcy = 0;
+               cb->srcz = 0;
+               cb->w = cur_size->width;
+               cb->h = cur_size->height;
+               cb->d = cur_size->depth;
+
+               suffix->suffixSize = sizeof(*suffix);
+               suffix->maximumOffset =
+                       svga3dsurface_get_image_buffer_size(desc, cur_size,
+                                                           body->guest.pitch);
+               suffix->flags.discard = 0;
+               suffix->flags.unsynchronized = 0;
+               suffix->flags.reserved = 0;
+               ++cmd;
+       }
+};
+
+
+/**
+ * vmw_hw_surface_destroy - destroy a Device surface
+ *
+ * @res:        Pointer to a struct vmw_resource embedded in a struct
+ *              vmw_surface.
+ *
+ * Destroys a the device surface associated with a struct vmw_surface if
+ * any, and adjusts accounting and resource count accordingly.
+ */
+static void vmw_hw_surface_destroy(struct vmw_resource *res)
+{
+
+       struct vmw_private *dev_priv = res->dev_priv;
+       struct vmw_surface *srf;
+       void *cmd;
+
+       if (res->id != -1) {
+
+               cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
+               if (unlikely(cmd == NULL)) {
+                       DRM_ERROR("Failed reserving FIFO space for surface "
+                                 "destruction.\n");
+                       return;
+               }
+
+               vmw_surface_destroy_encode(res->id, cmd);
+               vmw_fifo_commit(dev_priv, vmw_surface_destroy_size());
+
+               /*
+                * used_memory_size_atomic, or separate lock
+                * to avoid taking dev_priv::cmdbuf_mutex in
+                * the destroy path.
+                */
+
+               mutex_lock(&dev_priv->cmdbuf_mutex);
+               srf = vmw_res_to_srf(res);
+               dev_priv->used_memory_size -= res->backup_size;
+               mutex_unlock(&dev_priv->cmdbuf_mutex);
+       }
+       vmw_3d_resource_dec(dev_priv, false);
+}
+
+/**
+ * vmw_legacy_srf_create - Create a device surface as part of the
+ * resource validation process.
+ *
+ * @res: Pointer to a struct vmw_surface.
+ *
+ * If the surface doesn't have a hw id.
+ *
+ * Returns -EBUSY if there wasn't sufficient device resources to
+ * complete the validation. Retry after freeing up resources.
+ *
+ * May return other errors if the kernel is out of guest resources.
+ */
+static int vmw_legacy_srf_create(struct vmw_resource *res)
+{
+       struct vmw_private *dev_priv = res->dev_priv;
+       struct vmw_surface *srf;
+       uint32_t submit_size;
+       uint8_t *cmd;
+       int ret;
+
+       if (likely(res->id != -1))
+               return 0;
+
+       srf = vmw_res_to_srf(res);
+       if (unlikely(dev_priv->used_memory_size + res->backup_size >=
+                    dev_priv->memory_size))
+               return -EBUSY;
+
+       /*
+        * Alloc id for the resource.
+        */
+
+       ret = vmw_resource_alloc_id(res);
+       if (unlikely(ret != 0)) {
+               DRM_ERROR("Failed to allocate a surface id.\n");
+               goto out_no_id;
+       }
+
+       if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) {
+               ret = -EBUSY;
+               goto out_no_fifo;
+       }
+
+       /*
+        * Encode surface define- commands.
+        */
+
+       submit_size = vmw_surface_define_size(srf);
+       cmd = vmw_fifo_reserve(dev_priv, submit_size);
+       if (unlikely(cmd == NULL)) {
+               DRM_ERROR("Failed reserving FIFO space for surface "
+                         "creation.\n");
+               ret = -ENOMEM;
+               goto out_no_fifo;
+       }
+
+       vmw_surface_define_encode(srf, cmd);
+       vmw_fifo_commit(dev_priv, submit_size);
+       /*
+        * Surface memory usage accounting.
+        */
+
+       dev_priv->used_memory_size += res->backup_size;
+       return 0;
+
+out_no_fifo:
+       vmw_resource_release_id(res);
+out_no_id:
+       return ret;
+}
+
+/**
+ * vmw_legacy_srf_dma - Copy backup data to or from a legacy surface.
+ *
+ * @res:            Pointer to a struct vmw_res embedded in a struct
+ *                  vmw_surface.
+ * @val_buf:        Pointer to a struct ttm_validate_buffer containing
+ *                  information about the backup buffer.
+ * @bind:           Boolean wether to DMA to the surface.
+ *
+ * Transfer backup data to or from a legacy surface as part of the
+ * validation process.
+ * May return other errors if the kernel is out of guest resources.
+ * The backup buffer will be fenced or idle upon successful completion,
+ * and if the surface needs persistent backup storage, the backup buffer
+ * will also be returned reserved iff @bind is true.
+ */
+static int vmw_legacy_srf_dma(struct vmw_resource *res,
+                             struct ttm_validate_buffer *val_buf,
+                             bool bind)
+{
+       SVGAGuestPtr ptr;
+       struct vmw_fence_obj *fence;
+       uint32_t submit_size;
+       struct vmw_surface *srf = vmw_res_to_srf(res);
+       uint8_t *cmd;
+       struct vmw_private *dev_priv = res->dev_priv;
+
+       BUG_ON(val_buf->bo == NULL);
+
+       submit_size = vmw_surface_dma_size(srf);
+       cmd = vmw_fifo_reserve(dev_priv, submit_size);
+       if (unlikely(cmd == NULL)) {
+               DRM_ERROR("Failed reserving FIFO space for surface "
+                         "DMA.\n");
+               return -ENOMEM;
+       }
+       vmw_bo_get_guest_ptr(val_buf->bo, &ptr);
+       vmw_surface_dma_encode(srf, cmd, &ptr, bind);
+
+       vmw_fifo_commit(dev_priv, submit_size);
+
+       /*
+        * Create a fence object and fence the backup buffer.
+        */
+
+       (void) vmw_execbuf_fence_commands(NULL, dev_priv,
+                                         &fence, NULL);
+
+       vmw_fence_single_bo(val_buf->bo, fence);
+
+       if (likely(fence != NULL))
+               vmw_fence_obj_unreference(&fence);
+
+       return 0;
+}
+
+/**
+ * vmw_legacy_srf_bind - Perform a legacy surface bind as part of the
+ *                       surface validation process.
+ *
+ * @res:            Pointer to a struct vmw_res embedded in a struct
+ *                  vmw_surface.
+ * @val_buf:        Pointer to a struct ttm_validate_buffer containing
+ *                  information about the backup buffer.
+ *
+ * This function will copy backup data to the surface if the
+ * backup buffer is dirty.
+ */
+static int vmw_legacy_srf_bind(struct vmw_resource *res,
+                              struct ttm_validate_buffer *val_buf)
+{
+       if (!res->backup_dirty)
+               return 0;
+
+       return vmw_legacy_srf_dma(res, val_buf, true);
+}
+
+
+/**
+ * vmw_legacy_srf_unbind - Perform a legacy surface unbind as part of the
+ *                         surface eviction process.
+ *
+ * @res:            Pointer to a struct vmw_res embedded in a struct
+ *                  vmw_surface.
+ * @val_buf:        Pointer to a struct ttm_validate_buffer containing
+ *                  information about the backup buffer.
+ *
+ * This function will copy backup data from the surface.
+ */
+static int vmw_legacy_srf_unbind(struct vmw_resource *res,
+                                bool readback,
+                                struct ttm_validate_buffer *val_buf)
+{
+       if (unlikely(readback))
+               return vmw_legacy_srf_dma(res, val_buf, false);
+       return 0;
+}
+
+/**
+ * vmw_legacy_srf_destroy - Destroy a device surface as part of a
+ *                          resource eviction process.
+ *
+ * @res:            Pointer to a struct vmw_res embedded in a struct
+ *                  vmw_surface.
+ */
+static int vmw_legacy_srf_destroy(struct vmw_resource *res)
+{
+       struct vmw_private *dev_priv = res->dev_priv;
+       uint32_t submit_size;
+       uint8_t *cmd;
+
+       BUG_ON(res->id == -1);
+
+       /*
+        * Encode the dma- and surface destroy commands.
+        */
+
+       submit_size = vmw_surface_destroy_size();
+       cmd = vmw_fifo_reserve(dev_priv, submit_size);
+       if (unlikely(cmd == NULL)) {
+               DRM_ERROR("Failed reserving FIFO space for surface "
+                         "eviction.\n");
+               return -ENOMEM;
+       }
+
+       vmw_surface_destroy_encode(res->id, cmd);
+       vmw_fifo_commit(dev_priv, submit_size);
+
+       /*
+        * Surface memory usage accounting.
+        */
+
+       dev_priv->used_memory_size -= res->backup_size;
+
+       /*
+        * Release the surface ID.
+        */
+
+       vmw_resource_release_id(res);
+
+       return 0;
+}
+
+
+/**
+ * vmw_surface_init - initialize a struct vmw_surface
+ *
+ * @dev_priv:       Pointer to a device private struct.
+ * @srf:            Pointer to the struct vmw_surface to initialize.
+ * @res_free:       Pointer to a resource destructor used to free
+ *                  the object.
+ */
+static int vmw_surface_init(struct vmw_private *dev_priv,
+                           struct vmw_surface *srf,
+                           void (*res_free) (struct vmw_resource *res))
+{
+       int ret;
+       struct vmw_resource *res = &srf->res;
+
+       BUG_ON(res_free == NULL);
+       (void) vmw_3d_resource_inc(dev_priv, false);
+       ret = vmw_resource_init(dev_priv, res, true, res_free,
+                               &vmw_legacy_surface_func);
+
+       if (unlikely(ret != 0)) {
+               vmw_3d_resource_dec(dev_priv, false);
+               res_free(res);
+               return ret;
+       }
+
+       /*
+        * The surface won't be visible to hardware until a
+        * surface validate.
+        */
+
+       vmw_resource_activate(res, vmw_hw_surface_destroy);
+       return ret;
+}
+
+/**
+ * vmw_user_surface_base_to_res - TTM base object to resource converter for
+ *                                user visible surfaces
+ *
+ * @base:           Pointer to a TTM base object
+ *
+ * Returns the struct vmw_resource embedded in a struct vmw_surface
+ * for the user-visible object identified by the TTM base object @base.
+ */
+static struct vmw_resource *
+vmw_user_surface_base_to_res(struct ttm_base_object *base)
+{
+       return &(container_of(base, struct vmw_user_surface, base)->srf.res);
+}
+
+/**
+ * vmw_user_surface_free - User visible surface resource destructor
+ *
+ * @res:            A struct vmw_resource embedded in a struct vmw_surface.
+ */
+static void vmw_user_surface_free(struct vmw_resource *res)
+{
+       struct vmw_surface *srf = vmw_res_to_srf(res);
+       struct vmw_user_surface *user_srf =
+           container_of(srf, struct vmw_user_surface, srf);
+       struct vmw_private *dev_priv = srf->res.dev_priv;
+       uint32_t size = user_srf->size;
+
+       kfree(srf->offsets);
+       kfree(srf->sizes);
+       kfree(srf->snooper.image);
+       ttm_base_object_kfree(user_srf, base);
+       ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
+}
+
+/**
+ * vmw_user_surface_free - User visible surface TTM base object destructor
+ *
+ * @p_base:         Pointer to a pointer to a TTM base object
+ *                  embedded in a struct vmw_user_surface.
+ *
+ * Drops the base object's reference on its resource, and the
+ * pointer pointed to by *p_base is set to NULL.
+ */
+static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
+{
+       struct ttm_base_object *base = *p_base;
+       struct vmw_user_surface *user_srf =
+           container_of(base, struct vmw_user_surface, base);
+       struct vmw_resource *res = &user_srf->srf.res;
+
+       *p_base = NULL;
+       vmw_resource_unreference(&res);
+}
+
+/**
+ * vmw_user_surface_destroy_ioctl - Ioctl function implementing
+ *                                  the user surface destroy functionality.
+ *
+ * @dev:            Pointer to a struct drm_device.
+ * @data:           Pointer to data copied from / to user-space.
+ * @file_priv:      Pointer to a drm file private structure.
+ */
+int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
+                             struct drm_file *file_priv)
+{
+       struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
+       struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+
+       return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
+}
+
+/**
+ * vmw_user_surface_define_ioctl - Ioctl function implementing
+ *                                  the user surface define functionality.
+ *
+ * @dev:            Pointer to a struct drm_device.
+ * @data:           Pointer to data copied from / to user-space.
+ * @file_priv:      Pointer to a drm file private structure.
+ */
+int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
+                            struct drm_file *file_priv)
+{
+       struct vmw_private *dev_priv = vmw_priv(dev);
+       struct vmw_user_surface *user_srf;
+       struct vmw_surface *srf;
+       struct vmw_resource *res;
+       struct vmw_resource *tmp;
+       union drm_vmw_surface_create_arg *arg =
+           (union drm_vmw_surface_create_arg *)data;
+       struct drm_vmw_surface_create_req *req = &arg->req;
+       struct drm_vmw_surface_arg *rep = &arg->rep;
+       struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+       struct drm_vmw_size __user *user_sizes;
+       int ret;
+       int i, j;
+       uint32_t cur_bo_offset;
+       struct drm_vmw_size *cur_size;
+       struct vmw_surface_offset *cur_offset;
+       uint32_t num_sizes;
+       uint32_t size;
+       struct vmw_master *vmaster = vmw_master(file_priv->master);
+       const struct svga3d_surface_desc *desc;
+
+       if (unlikely(vmw_user_surface_size == 0))
+               vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
+                       128;
+
+       num_sizes = 0;
+       for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
+               num_sizes += req->mip_levels[i];
+
+       if (num_sizes > DRM_VMW_MAX_SURFACE_FACES *
+           DRM_VMW_MAX_MIP_LEVELS)
+               return -EINVAL;
+
+       size = vmw_user_surface_size + 128 +
+               ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) +
+               ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset));
+
+
+       desc = svga3dsurface_get_desc(req->format);
+       if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
+               DRM_ERROR("Invalid surface format for surface creation.\n");
+               return -EINVAL;
+       }
+
+       ret = ttm_read_lock(&vmaster->lock, true);
+       if (unlikely(ret != 0))
+               return ret;
+
+       ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
+                                  size, false, true);
+       if (unlikely(ret != 0)) {
+               if (ret != -ERESTARTSYS)
+                       DRM_ERROR("Out of graphics memory for surface"
+                                 " creation.\n");
+               goto out_unlock;
+       }
+
+       user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
+       if (unlikely(user_srf == NULL)) {
+               ret = -ENOMEM;
+               goto out_no_user_srf;
+       }
+
+       srf = &user_srf->srf;
+       res = &srf->res;
+
+       srf->flags = req->flags;
+       srf->format = req->format;
+       srf->scanout = req->scanout;
+
+       memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
+       srf->num_sizes = num_sizes;
+       user_srf->size = size;
+
+       srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
+       if (unlikely(srf->sizes == NULL)) {
+               ret = -ENOMEM;
+               goto out_no_sizes;
+       }
+       srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets),
+                              GFP_KERNEL);
+       if (unlikely(srf->sizes == NULL)) {
+               ret = -ENOMEM;
+               goto out_no_offsets;
+       }
+
+       user_sizes = (struct drm_vmw_size __user *)(unsigned long)
+           req->size_addr;
+
+       ret = copy_from_user(srf->sizes, user_sizes,
+                            srf->num_sizes * sizeof(*srf->sizes));
+       if (unlikely(ret != 0)) {
+               ret = -EFAULT;
+               goto out_no_copy;
+       }
+
+       srf->base_size = *srf->sizes;
+       srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
+       srf->multisample_count = 1;
+
+       cur_bo_offset = 0;
+       cur_offset = srf->offsets;
+       cur_size = srf->sizes;
+
+       for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
+               for (j = 0; j < srf->mip_levels[i]; ++j) {
+                       uint32_t stride = svga3dsurface_calculate_pitch
+                               (desc, cur_size);
+
+                       cur_offset->face = i;
+                       cur_offset->mip = j;
+                       cur_offset->bo_offset = cur_bo_offset;
+                       cur_bo_offset += svga3dsurface_get_image_buffer_size
+                               (desc, cur_size, stride);
+                       ++cur_offset;
+                       ++cur_size;
+               }
+       }
+       res->backup_size = cur_bo_offset;
+       if (srf->scanout &&
+           srf->num_sizes == 1 &&
+           srf->sizes[0].width == 64 &&
+           srf->sizes[0].height == 64 &&
+           srf->format == SVGA3D_A8R8G8B8) {
+
+               srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
+               /* clear the image */
+               if (srf->snooper.image) {
+                       memset(srf->snooper.image, 0x00, 64 * 64 * 4);
+               } else {
+                       DRM_ERROR("Failed to allocate cursor_image\n");
+                       ret = -ENOMEM;
+                       goto out_no_copy;
+               }
+       } else {
+               srf->snooper.image = NULL;
+       }
+       srf->snooper.crtc = NULL;
+
+       user_srf->base.shareable = false;
+       user_srf->base.tfile = NULL;
+
+       /**
+        * From this point, the generic resource management functions
+        * destroy the object on failure.
+        */
+
+       ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
+       if (unlikely(ret != 0))
+               goto out_unlock;
+
+       tmp = vmw_resource_reference(&srf->res);
+       ret = ttm_base_object_init(tfile, &user_srf->base,
+                                  req->shareable, VMW_RES_SURFACE,
+                                  &vmw_user_surface_base_release, NULL);
+
+       if (unlikely(ret != 0)) {
+               vmw_resource_unreference(&tmp);
+               vmw_resource_unreference(&res);
+               goto out_unlock;
+       }
+
+       rep->sid = user_srf->base.hash.key;
+       vmw_resource_unreference(&res);
+
+       ttm_read_unlock(&vmaster->lock);
+       return 0;
+out_no_copy:
+       kfree(srf->offsets);
+out_no_offsets:
+       kfree(srf->sizes);
+out_no_sizes:
+       ttm_base_object_kfree(user_srf, base);
+out_no_user_srf:
+       ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
+out_unlock:
+       ttm_read_unlock(&vmaster->lock);
+       return ret;
+}
+
+/**
+ * vmw_user_surface_define_ioctl - Ioctl function implementing
+ *                                  the user surface reference functionality.
+ *
+ * @dev:            Pointer to a struct drm_device.
+ * @data:           Pointer to data copied from / to user-space.
+ * @file_priv:      Pointer to a drm file private structure.
+ */
+int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
+                               struct drm_file *file_priv)
+{
+       union drm_vmw_surface_reference_arg *arg =
+           (union drm_vmw_surface_reference_arg *)data;
+       struct drm_vmw_surface_arg *req = &arg->req;
+       struct drm_vmw_surface_create_req *rep = &arg->rep;
+       struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+       struct vmw_surface *srf;
+       struct vmw_user_surface *user_srf;
+       struct drm_vmw_size __user *user_sizes;
+       struct ttm_base_object *base;
+       int ret = -EINVAL;
+
+       base = ttm_base_object_lookup(tfile, req->sid);
+       if (unlikely(base == NULL)) {
+               DRM_ERROR("Could not find surface to reference.\n");
+               return -EINVAL;
+       }
+
+       if (unlikely(base->object_type != VMW_RES_SURFACE))
+               goto out_bad_resource;
+
+       user_srf = container_of(base, struct vmw_user_surface, base);
+       srf = &user_srf->srf;
+
+       ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
+       if (unlikely(ret != 0)) {
+               DRM_ERROR("Could not add a reference to a surface.\n");
+               goto out_no_reference;
+       }
+
+       rep->flags = srf->flags;
+       rep->format = srf->format;
+       memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
+       user_sizes = (struct drm_vmw_size __user *)(unsigned long)
+           rep->size_addr;
+
+       if (user_sizes)
+               ret = copy_to_user(user_sizes, srf->sizes,
+                                  srf->num_sizes * sizeof(*srf->sizes));
+       if (unlikely(ret != 0)) {
+               DRM_ERROR("copy_to_user failed %p %u\n",
+                         user_sizes, srf->num_sizes);
+               ret = -EFAULT;
+       }
+out_bad_resource:
+out_no_reference:
+       ttm_base_object_unref(&base);
+
+       return ret;
+}
index e25cf31..fa60add 100644 (file)
@@ -18,7 +18,6 @@
  */
 
 #include <linux/module.h>
-#include <linux/dmi.h>
 #include <linux/seq_file.h>
 #include <linux/uaccess.h>
 #include <linux/fs.h>
@@ -376,7 +375,6 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
                             size_t cnt, loff_t *ppos)
 {
        char usercmd[64];
-       const char *pdev_name;
        int ret;
        bool delay = false, can_switch;
        bool just_mux = false;
@@ -468,7 +466,6 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
                goto out;
 
        if (can_switch) {
-               pdev_name = pci_name(client->pdev);
                ret = vga_switchto_stage1(client);
                if (ret)
                        printk(KERN_ERR "vga_switcheroo: switching failed stage 1 %d\n", ret);
@@ -540,7 +537,6 @@ fail:
 int vga_switcheroo_process_delayed_switch(void)
 {
        struct vga_switcheroo_client *client;
-       const char *pdev_name;
        int ret;
        int err = -EINVAL;
 
@@ -555,7 +551,6 @@ int vga_switcheroo_process_delayed_switch(void)
        if (!client || !check_can_switch())
                goto err;
 
-       pdev_name = pci_name(client->pdev);
        ret = vga_switchto_stage2(client);
        if (ret)
                printk(KERN_ERR "vga_switcheroo: delayed switching failed stage 2 %d\n", ret);
@@ -567,4 +562,3 @@ err:
        return err;
 }
 EXPORT_SYMBOL(vga_switcheroo_process_delayed_switch);
-
index e9df461..c7bff51 100644 (file)
@@ -818,6 +818,16 @@ config I2C_TINY_USB
          This driver can also be built as a module.  If so, the module
          will be called i2c-tiny-usb.
 
+config I2C_VIPERBOARD
+       tristate "Viperboard I2C master support"
+       depends on MFD_VIPERBOARD && USB
+       help
+         Say yes here to access the I2C part of the Nano River
+         Technologies Viperboard as I2C master.
+          See viperboard API specification and Nano
+          River Tech's viperboard.h for detailed meaning
+          of the module parameters.
+
 comment "Other I2C/SMBus bus drivers"
 
 config I2C_ACORN
index 395b516..e5cb209 100644 (file)
@@ -79,6 +79,7 @@ obj-$(CONFIG_I2C_PARPORT)     += i2c-parport.o
 obj-$(CONFIG_I2C_PARPORT_LIGHT)        += i2c-parport-light.o
 obj-$(CONFIG_I2C_TAOS_EVM)     += i2c-taos-evm.o
 obj-$(CONFIG_I2C_TINY_USB)     += i2c-tiny-usb.o
+obj-$(CONFIG_I2C_VIPERBOARD)   += i2c-viperboard.o
 
 # Other I2C/SMBus bus drivers
 obj-$(CONFIG_I2C_ACORN)                += i2c-acorn.o
index 6abc00d..1e73638 100644 (file)
@@ -81,6 +81,7 @@
 #include <linux/slab.h>
 #include <linux/wait.h>
 #include <linux/err.h>
+#include <linux/of_i2c.h>
 
 #if (defined CONFIG_I2C_MUX_GPIO || defined CONFIG_I2C_MUX_GPIO_MODULE) && \
                defined CONFIG_DMI
@@ -1108,6 +1109,7 @@ static int __devinit i801_probe(struct pci_dev *dev,
                /* fall through */
        default:
                priv->features |= FEATURE_I2C_BLOCK_READ;
+               priv->features |= FEATURE_IRQ;
                /* fall through */
        case PCI_DEVICE_ID_INTEL_82801DB_3:
                priv->features |= FEATURE_SMBUS_PEC;
@@ -1120,16 +1122,6 @@ static int __devinit i801_probe(struct pci_dev *dev,
                break;
        }
 
-       /* IRQ processing tested on CougarPoint PCH, ICH5, ICH7-M and ICH10 */
-       if (dev->device == PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS ||
-           dev->device == PCI_DEVICE_ID_INTEL_82801EB_3 ||
-           dev->device == PCI_DEVICE_ID_INTEL_ICH7_17 ||
-           dev->device == PCI_DEVICE_ID_INTEL_ICH8_5 ||
-           dev->device == PCI_DEVICE_ID_INTEL_ICH9_6 ||
-           dev->device == PCI_DEVICE_ID_INTEL_ICH10_4 ||
-           dev->device == PCI_DEVICE_ID_INTEL_ICH10_5)
-               priv->features |= FEATURE_IRQ;
-
        /* Disable features on user request */
        for (i = 0; i < ARRAY_SIZE(i801_feature_names); i++) {
                if (priv->features & disable_features & (1 << i))
@@ -1215,6 +1207,7 @@ static int __devinit i801_probe(struct pci_dev *dev,
                goto exit_free_irq;
        }
 
+       of_i2c_register_devices(&priv->adapter);
        i801_probe_optional_slaves(priv);
        /* We ignore errors - multiplexing is optional */
        i801_add_mux(priv);
index 8bbd6ec..f7216ed 100644 (file)
@@ -204,9 +204,8 @@ static int __devinit piix4_setup(struct pci_dev *PIIX4_dev,
                         */
                        pci_write_config_byte(PIIX4_dev, SMBHSTCFG,
                                              temp | 1);
-                       dev_printk(KERN_NOTICE, &PIIX4_dev->dev,
-                               "WARNING: SMBus interface has been "
-                               "FORCEFULLY ENABLED!\n");
+                       dev_notice(&PIIX4_dev->dev,
+                                  "WARNING: SMBus interface has been FORCEFULLY ENABLED!\n");
                } else {
                        dev_err(&PIIX4_dev->dev,
                                "Host SMBus controller not enabled!\n");
diff --git a/drivers/i2c/busses/i2c-viperboard.c b/drivers/i2c/busses/i2c-viperboard.c
new file mode 100644 (file)
index 0000000..f5fa20d
--- /dev/null
@@ -0,0 +1,480 @@
+/*
+ *  Nano River Technologies viperboard i2c master driver
+ *
+ *  (C) 2012 by Lemonage GmbH
+ *  Author: Lars Poeschel <poeschel@lemonage.de>
+ *  All rights reserved.
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the License, or (at your
+ *  option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+
+#include <linux/usb.h>
+#include <linux/i2c.h>
+
+#include <linux/mfd/viperboard.h>
+
+struct vprbrd_i2c {
+       struct i2c_adapter i2c;
+       u8 bus_freq_param;
+};
+
+/* i2c bus frequency module parameter */
+static u8 i2c_bus_param;
+static unsigned int i2c_bus_freq = 100;
+module_param(i2c_bus_freq, int, 0);
+MODULE_PARM_DESC(i2c_bus_freq,
+       "i2c bus frequency in khz (default is 100) valid values: 10, 100, 200, 400, 1000, 3000, 6000");
+
+static int vprbrd_i2c_status(struct i2c_adapter *i2c,
+       struct vprbrd_i2c_status *status, bool prev_error)
+{
+       u16 bytes_xfer;
+       int ret;
+       struct vprbrd *vb = (struct vprbrd *)i2c->algo_data;
+
+       /* check for protocol error */
+       bytes_xfer = sizeof(struct vprbrd_i2c_status);
+
+       ret = usb_control_msg(vb->usb_dev, usb_rcvctrlpipe(vb->usb_dev, 0),
+               VPRBRD_USB_REQUEST_I2C, VPRBRD_USB_TYPE_IN, 0x0000, 0x0000,
+               status, bytes_xfer, VPRBRD_USB_TIMEOUT_MS);
+
+       if (ret != bytes_xfer)
+               prev_error = true;
+
+       if (prev_error) {
+               dev_err(&i2c->dev, "failure in usb communication\n");
+               return -EREMOTEIO;
+       }
+
+       dev_dbg(&i2c->dev, "  status = %d\n", status->status);
+       if (status->status != 0x00) {
+               dev_err(&i2c->dev, "failure: i2c protocol error\n");
+               return -EPROTO;
+       }
+       return 0;
+}
+
+static int vprbrd_i2c_receive(struct usb_device *usb_dev,
+       struct vprbrd_i2c_read_msg *rmsg, int bytes_xfer)
+{
+       int ret, bytes_actual;
+       int error = 0;
+
+       /* send the read request */
+       ret = usb_bulk_msg(usb_dev,
+               usb_sndbulkpipe(usb_dev, VPRBRD_EP_OUT), rmsg,
+               sizeof(struct vprbrd_i2c_read_hdr), &bytes_actual,
+               VPRBRD_USB_TIMEOUT_MS);
+
+       if ((ret < 0)
+               || (bytes_actual != sizeof(struct vprbrd_i2c_read_hdr))) {
+               dev_err(&usb_dev->dev, "failure transmitting usb\n");
+               error = -EREMOTEIO;
+       }
+
+       /* read the actual data */
+       ret = usb_bulk_msg(usb_dev,
+               usb_rcvbulkpipe(usb_dev, VPRBRD_EP_IN), rmsg,
+               bytes_xfer, &bytes_actual, VPRBRD_USB_TIMEOUT_MS);
+
+       if ((ret < 0) || (bytes_xfer != bytes_actual)) {
+               dev_err(&usb_dev->dev, "failure receiving usb\n");
+               error = -EREMOTEIO;
+       }
+       return error;
+}
+
+static int vprbrd_i2c_addr(struct usb_device *usb_dev,
+       struct vprbrd_i2c_addr_msg *amsg)
+{
+       int ret, bytes_actual;
+
+       ret = usb_bulk_msg(usb_dev,
+               usb_sndbulkpipe(usb_dev, VPRBRD_EP_OUT), amsg,
+               sizeof(struct vprbrd_i2c_addr_msg), &bytes_actual,
+               VPRBRD_USB_TIMEOUT_MS);
+
+       if ((ret < 0) ||
+                       (sizeof(struct vprbrd_i2c_addr_msg) != bytes_actual)) {
+               dev_err(&usb_dev->dev, "failure transmitting usb\n");
+               return -EREMOTEIO;
+       }
+       return 0;
+}
+
+static int vprbrd_i2c_read(struct vprbrd *vb, struct i2c_msg *msg)
+{
+       int ret;
+       u16 remain_len, bytes_xfer, len1, len2,
+               start = 0x0000;
+       struct vprbrd_i2c_read_msg *rmsg =
+               (struct vprbrd_i2c_read_msg *)vb->buf;
+
+       remain_len = msg->len;
+       rmsg->header.cmd = VPRBRD_I2C_CMD_READ;
+       while (remain_len > 0) {
+               rmsg->header.addr = cpu_to_le16(start + 0x4000);
+               if (remain_len <= 255) {
+                       len1 = remain_len;
+                       len2 = 0x00;
+                       rmsg->header.len0 = remain_len;
+                       rmsg->header.len1 = 0x00;
+                       rmsg->header.len2 = 0x00;
+                       rmsg->header.len3 = 0x00;
+                       rmsg->header.len4 = 0x00;
+                       rmsg->header.len5 = 0x00;
+                       remain_len = 0;
+               } else if (remain_len <= 510) {
+                       len1 = remain_len;
+                       len2 = 0x00;
+                       rmsg->header.len0 = remain_len - 255;
+                       rmsg->header.len1 = 0xff;
+                       rmsg->header.len2 = 0x00;
+                       rmsg->header.len3 = 0x00;
+                       rmsg->header.len4 = 0x00;
+                       rmsg->header.len5 = 0x00;
+                       remain_len = 0;
+               } else if (remain_len <= 512) {
+                       len1 = remain_len;
+                       len2 = 0x00;
+                       rmsg->header.len0 = remain_len - 510;
+                       rmsg->header.len1 = 0xff;
+                       rmsg->header.len2 = 0xff;
+                       rmsg->header.len3 = 0x00;
+                       rmsg->header.len4 = 0x00;
+                       rmsg->header.len5 = 0x00;
+                       remain_len = 0;
+               } else if (remain_len <= 767) {
+                       len1 = 512;
+                       len2 = remain_len - 512;
+                       rmsg->header.len0 = 0x02;
+                       rmsg->header.len1 = 0xff;
+                       rmsg->header.len2 = 0xff;
+                       rmsg->header.len3 = remain_len - 512;
+                       rmsg->header.len4 = 0x00;
+                       rmsg->header.len5 = 0x00;
+                       bytes_xfer = remain_len;
+                       remain_len = 0;
+               } else if (remain_len <= 1022) {
+                       len1 = 512;
+                       len2 = remain_len - 512;
+                       rmsg->header.len0 = 0x02;
+                       rmsg->header.len1 = 0xff;
+                       rmsg->header.len2 = 0xff;
+                       rmsg->header.len3 = remain_len - 767;
+                       rmsg->header.len4 = 0xff;
+                       rmsg->header.len5 = 0x00;
+                       remain_len = 0;
+               } else if (remain_len <= 1024) {
+                       len1 = 512;
+                       len2 = remain_len - 512;
+                       rmsg->header.len0 = 0x02;
+                       rmsg->header.len1 = 0xff;
+                       rmsg->header.len2 = 0xff;
+                       rmsg->header.len3 = remain_len - 1022;
+                       rmsg->header.len4 = 0xff;
+                       rmsg->header.len5 = 0xff;
+                       remain_len = 0;
+               } else {
+                       len1 = 512;
+                       len2 = 512;
+                       rmsg->header.len0 = 0x02;
+                       rmsg->header.len1 = 0xff;
+                       rmsg->header.len2 = 0xff;
+                       rmsg->header.len3 = 0x02;
+                       rmsg->header.len4 = 0xff;
+                       rmsg->header.len5 = 0xff;
+                       remain_len -= 1024;
+                       start += 1024;
+               }
+               rmsg->header.tf1 = cpu_to_le16(len1);
+               rmsg->header.tf2 = cpu_to_le16(len2);
+
+               /* first read transfer */
+               ret = vprbrd_i2c_receive(vb->usb_dev, rmsg, len1);
+               if (ret < 0)
+                       return ret;
+               /* copy the received data */
+               memcpy(msg->buf + start, rmsg, len1);
+
+               /* second read transfer if neccessary */
+               if (len2 > 0) {
+                       ret = vprbrd_i2c_receive(vb->usb_dev, rmsg, len2);
+                       if (ret < 0)
+                               return ret;
+                       /* copy the received data */
+                       memcpy(msg->buf + start + 512, rmsg, len2);
+               }
+       }
+       return 0;
+}
+
+static int vprbrd_i2c_write(struct vprbrd *vb, struct i2c_msg *msg)
+{
+       int ret, bytes_actual;
+       u16 remain_len, bytes_xfer,
+               start = 0x0000;
+       struct vprbrd_i2c_write_msg *wmsg =
+               (struct vprbrd_i2c_write_msg *)vb->buf;
+
+       remain_len = msg->len;
+       wmsg->header.cmd = VPRBRD_I2C_CMD_WRITE;
+       wmsg->header.last = 0x00;
+       wmsg->header.chan = 0x00;
+       wmsg->header.spi = 0x0000;
+       while (remain_len > 0) {
+               wmsg->header.addr = cpu_to_le16(start + 0x4000);
+               if (remain_len > 503) {
+                       wmsg->header.len1 = 0xff;
+                       wmsg->header.len2 = 0xf8;
+                       remain_len -= 503;
+                       bytes_xfer = 503 + sizeof(struct vprbrd_i2c_write_hdr);
+                       start += 503;
+               } else if (remain_len > 255) {
+                       wmsg->header.len1 = 0xff;
+                       wmsg->header.len2 = (remain_len - 255);
+                       bytes_xfer = remain_len +
+                               sizeof(struct vprbrd_i2c_write_hdr);
+                       remain_len = 0;
+               } else {
+                       wmsg->header.len1 = remain_len;
+                       wmsg->header.len2 = 0x00;
+                       bytes_xfer = remain_len +
+                               sizeof(struct vprbrd_i2c_write_hdr);
+                       remain_len = 0;
+               }
+               memcpy(wmsg->data, msg->buf + start,
+                       bytes_xfer - sizeof(struct vprbrd_i2c_write_hdr));
+
+               ret = usb_bulk_msg(vb->usb_dev,
+                       usb_sndbulkpipe(vb->usb_dev,
+                       VPRBRD_EP_OUT), wmsg,
+                       bytes_xfer, &bytes_actual, VPRBRD_USB_TIMEOUT_MS);
+               if ((ret < 0) || (bytes_xfer != bytes_actual))
+                       return -EREMOTEIO;
+       }
+       return 0;
+}
+
+static int vprbrd_i2c_xfer(struct i2c_adapter *i2c, struct i2c_msg *msgs,
+               int num)
+{
+       struct i2c_msg *pmsg;
+       int i, ret,
+               error = 0;
+       struct vprbrd *vb = (struct vprbrd *)i2c->algo_data;
+       struct vprbrd_i2c_addr_msg *amsg =
+               (struct vprbrd_i2c_addr_msg *)vb->buf;
+       struct vprbrd_i2c_status *smsg = (struct vprbrd_i2c_status *)vb->buf;
+
+       dev_dbg(&i2c->dev, "master xfer %d messages:\n", num);
+
+       for (i = 0 ; i < num ; i++) {
+               pmsg = &msgs[i];
+
+               dev_dbg(&i2c->dev,
+                       "  %d: %s (flags %d) %d bytes to 0x%02x\n",
+                       i, pmsg->flags & I2C_M_RD ? "read" : "write",
+                       pmsg->flags, pmsg->len, pmsg->addr);
+
+               /* msgs longer than 2048 bytes are not supported by adapter */
+               if (pmsg->len > 2048)
+                       return -EINVAL;
+
+               mutex_lock(&vb->lock);
+               /* directly send the message */
+               if (pmsg->flags & I2C_M_RD) {
+                       /* read data */
+                       amsg->cmd = VPRBRD_I2C_CMD_ADDR;
+                       amsg->unknown2 = 0x00;
+                       amsg->unknown3 = 0x00;
+                       amsg->addr = pmsg->addr;
+                       amsg->unknown1 = 0x01;
+                       amsg->len = cpu_to_le16(pmsg->len);
+                       /* send the addr and len, we're interested to board */
+                       ret = vprbrd_i2c_addr(vb->usb_dev, amsg);
+                       if (ret < 0)
+                               error = ret;
+
+                       ret = vprbrd_i2c_read(vb, pmsg);
+                       if (ret < 0)
+                               error = ret;
+
+                       ret = vprbrd_i2c_status(i2c, smsg, error);
+                       if (ret < 0)
+                               error = ret;
+                       /* in case of protocol error, return the error */
+                       if (error < 0)
+                               goto error;
+               } else {
+                       /* write data */
+                       ret = vprbrd_i2c_write(vb, pmsg);
+
+                       amsg->cmd = VPRBRD_I2C_CMD_ADDR;
+                       amsg->unknown2 = 0x00;
+                       amsg->unknown3 = 0x00;
+                       amsg->addr = pmsg->addr;
+                       amsg->unknown1 = 0x00;
+                       amsg->len = cpu_to_le16(pmsg->len);
+                       /* send the addr, the data goes to to board */
+                       ret = vprbrd_i2c_addr(vb->usb_dev, amsg);
+                       if (ret < 0)
+                               error = ret;
+
+                       ret = vprbrd_i2c_status(i2c, smsg, error);
+                       if (ret < 0)
+                               error = ret;
+
+                       if (error < 0)
+                               goto error;
+               }
+               mutex_unlock(&vb->lock);
+       }
+       return 0;
+error:
+       mutex_unlock(&vb->lock);
+       return error;
+}
+
+static u32 vprbrd_i2c_func(struct i2c_adapter *i2c)
+{
+       return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+/* This is the actual algorithm we define */
+static const struct i2c_algorithm vprbrd_algorithm = {
+       .master_xfer    = vprbrd_i2c_xfer,
+       .functionality  = vprbrd_i2c_func,
+};
+
+static int __devinit vprbrd_i2c_probe(struct platform_device *pdev)
+{
+       struct vprbrd *vb = dev_get_drvdata(pdev->dev.parent);
+       struct vprbrd_i2c *vb_i2c;
+       int ret;
+       int pipe;
+
+       vb_i2c = kzalloc(sizeof(*vb_i2c), GFP_KERNEL);
+       if (vb_i2c == NULL)
+               return -ENOMEM;
+
+       /* setup i2c adapter description */
+       vb_i2c->i2c.owner = THIS_MODULE;
+       vb_i2c->i2c.class = I2C_CLASS_HWMON;
+       vb_i2c->i2c.algo = &vprbrd_algorithm;
+       vb_i2c->i2c.algo_data = vb;
+       /* save the param in usb capabable memory */
+       vb_i2c->bus_freq_param = i2c_bus_param;
+
+       snprintf(vb_i2c->i2c.name, sizeof(vb_i2c->i2c.name),
+                "viperboard at bus %03d device %03d",
+                vb->usb_dev->bus->busnum, vb->usb_dev->devnum);
+
+       /* setting the bus frequency */
+       if ((i2c_bus_param <= VPRBRD_I2C_FREQ_10KHZ)
+               && (i2c_bus_param >= VPRBRD_I2C_FREQ_6MHZ)) {
+               pipe = usb_sndctrlpipe(vb->usb_dev, 0);
+               ret = usb_control_msg(vb->usb_dev, pipe,
+                       VPRBRD_USB_REQUEST_I2C_FREQ, VPRBRD_USB_TYPE_OUT,
+                       0x0000, 0x0000, &vb_i2c->bus_freq_param, 1,
+                       VPRBRD_USB_TIMEOUT_MS);
+           if (ret != 1) {
+               dev_err(&pdev->dev,
+                       "failure setting i2c_bus_freq to %d\n", i2c_bus_freq);
+               ret = -EIO;
+               goto error;
+           }
+       } else {
+               dev_err(&pdev->dev,
+                       "invalid i2c_bus_freq setting:%d\n", i2c_bus_freq);
+               ret = -EIO;
+               goto error;
+       }
+
+       vb_i2c->i2c.dev.parent = &pdev->dev;
+
+       /* attach to i2c layer */
+       i2c_add_adapter(&vb_i2c->i2c);
+
+       platform_set_drvdata(pdev, vb_i2c);
+
+       return 0;
+
+error:
+       kfree(vb_i2c);
+       return ret;
+}
+
+static int __devexit vprbrd_i2c_remove(struct platform_device *pdev)
+{
+       struct vprbrd_i2c *vb_i2c = platform_get_drvdata(pdev);
+       int ret;
+
+       ret = i2c_del_adapter(&vb_i2c->i2c);
+
+       return ret;
+}
+
+static struct platform_driver vprbrd_i2c_driver = {
+       .driver.name    = "viperboard-i2c",
+       .driver.owner   = THIS_MODULE,
+       .probe          = vprbrd_i2c_probe,
+       .remove         = __devexit_p(vprbrd_i2c_remove),
+};
+
+static int __init vprbrd_i2c_init(void)
+{
+       switch (i2c_bus_freq) {
+       case 6000:
+               i2c_bus_param = VPRBRD_I2C_FREQ_6MHZ;
+               break;
+       case 3000:
+               i2c_bus_param = VPRBRD_I2C_FREQ_3MHZ;
+               break;
+       case 1000:
+               i2c_bus_param = VPRBRD_I2C_FREQ_1MHZ;
+               break;
+       case 400:
+               i2c_bus_param = VPRBRD_I2C_FREQ_400KHZ;
+               break;
+       case 200:
+               i2c_bus_param = VPRBRD_I2C_FREQ_200KHZ;
+               break;
+       case 100:
+               i2c_bus_param = VPRBRD_I2C_FREQ_100KHZ;
+               break;
+       case 10:
+               i2c_bus_param = VPRBRD_I2C_FREQ_10KHZ;
+               break;
+       default:
+               pr_warn("invalid i2c_bus_freq (%d)\n", i2c_bus_freq);
+               i2c_bus_param = VPRBRD_I2C_FREQ_100KHZ;
+       }
+
+       return platform_driver_register(&vprbrd_i2c_driver);
+}
+subsys_initcall(vprbrd_i2c_init);
+
+static void __exit vprbrd_i2c_exit(void)
+{
+       platform_driver_unregister(&vprbrd_i2c_driver);
+}
+module_exit(vprbrd_i2c_exit);
+
+MODULE_AUTHOR("Lars Poeschel <poeschel@lemonage.de>");
+MODULE_DESCRIPTION("I2C master driver for Nano River Techs Viperboard");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:viperboard-i2c");
index 961b8d0..fe822a1 100644 (file)
@@ -125,4 +125,18 @@ config TI_ADC081C
          This driver can also be built as a module. If so, the module will be
          called ti-adc081c.
 
+config TI_AM335X_ADC
+       tristate "TI's ADC driver"
+       depends on MFD_TI_AM335X_TSCADC
+       help
+         Say yes here to build support for Texas Instruments ADC
+         driver which is also a MFD client.
+
+config VIPERBOARD_ADC
+       tristate "Viperboard ADC support"
+       depends on MFD_VIPERBOARD && USB
+       help
+         Say yes here to access the ADC part of the Nano River
+         Technologies Viperboard.
+
 endmenu
index 472fd7c..2d5f100 100644 (file)
@@ -13,4 +13,5 @@ obj-$(CONFIG_AT91_ADC) += at91_adc.o
 obj-$(CONFIG_LP8788_ADC) += lp8788_adc.o
 obj-$(CONFIG_MAX1363) += max1363.o
 obj-$(CONFIG_TI_ADC081C) += ti-adc081c.o
-
+obj-$(CONFIG_TI_AM335X_ADC) += ti_am335x_adc.o
+obj-$(CONFIG_VIPERBOARD_ADC) += viperboard_adc.o
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
new file mode 100644 (file)
index 0000000..02a43c8
--- /dev/null
@@ -0,0 +1,260 @@
+/*
+ * TI ADC MFD driver
+ *
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/iio/iio.h>
+
+#include <linux/mfd/ti_am335x_tscadc.h>
+#include <linux/platform_data/ti_am335x_adc.h>
+
+struct tiadc_device {
+       struct ti_tscadc_dev *mfd_tscadc;
+       int channels;
+};
+
+static unsigned int tiadc_readl(struct tiadc_device *adc, unsigned int reg)
+{
+       return readl(adc->mfd_tscadc->tscadc_base + reg);
+}
+
+static void tiadc_writel(struct tiadc_device *adc, unsigned int reg,
+                                       unsigned int val)
+{
+       writel(val, adc->mfd_tscadc->tscadc_base + reg);
+}
+
+static void tiadc_step_config(struct tiadc_device *adc_dev)
+{
+       unsigned int stepconfig;
+       int i, channels = 0, steps;
+
+       /*
+        * There are 16 configurable steps and 8 analog input
+        * lines available which are shared between Touchscreen and ADC.
+        *
+        * Steps backwards i.e. from 16 towards 0 are used by ADC
+        * depending on number of input lines needed.
+        * Channel would represent which analog input
+        * needs to be given to ADC to digitalize data.
+        */
+
+       steps = TOTAL_STEPS - adc_dev->channels;
+       channels = TOTAL_CHANNELS - adc_dev->channels;
+
+       stepconfig = STEPCONFIG_AVG_16 | STEPCONFIG_FIFO1;
+
+       for (i = (steps + 1); i <= TOTAL_STEPS; i++) {
+               tiadc_writel(adc_dev, REG_STEPCONFIG(i),
+                               stepconfig | STEPCONFIG_INP(channels));
+               tiadc_writel(adc_dev, REG_STEPDELAY(i),
+                               STEPCONFIG_OPENDLY);
+               channels++;
+       }
+       tiadc_writel(adc_dev, REG_SE, STPENB_STEPENB);
+}
+
+static int tiadc_channel_init(struct iio_dev *indio_dev, int channels)
+{
+       struct iio_chan_spec *chan_array;
+       int i;
+
+       indio_dev->num_channels = channels;
+       chan_array = kcalloc(indio_dev->num_channels,
+                       sizeof(struct iio_chan_spec), GFP_KERNEL);
+
+       if (chan_array == NULL)
+               return -ENOMEM;
+
+       for (i = 0; i < (indio_dev->num_channels); i++) {
+               struct iio_chan_spec *chan = chan_array + i;
+               chan->type = IIO_VOLTAGE;
+               chan->indexed = 1;
+               chan->channel = i;
+               chan->info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT;
+       }
+
+       indio_dev->channels = chan_array;
+
+       return indio_dev->num_channels;
+}
+
+static void tiadc_channels_remove(struct iio_dev *indio_dev)
+{
+       kfree(indio_dev->channels);
+}
+
+static int tiadc_read_raw(struct iio_dev *indio_dev,
+               struct iio_chan_spec const *chan,
+               int *val, int *val2, long mask)
+{
+       struct tiadc_device *adc_dev = iio_priv(indio_dev);
+       int i;
+       unsigned int fifo1count, readx1;
+
+       /*
+        * When the sub-system is first enabled,
+        * the sequencer will always start with the
+        * lowest step (1) and continue until step (16).
+        * For ex: If we have enabled 4 ADC channels and
+        * currently use only 1 out of them, the
+        * sequencer still configures all the 4 steps,
+        * leading to 3 unwanted data.
+        * Hence we need to flush out this data.
+        */
+
+       fifo1count = tiadc_readl(adc_dev, REG_FIFO1CNT);
+       for (i = 0; i < fifo1count; i++) {
+               readx1 = tiadc_readl(adc_dev, REG_FIFO1);
+               if (i == chan->channel)
+                       *val = readx1 & 0xfff;
+       }
+       tiadc_writel(adc_dev, REG_SE, STPENB_STEPENB);
+
+       return IIO_VAL_INT;
+}
+
+static const struct iio_info tiadc_info = {
+       .read_raw = &tiadc_read_raw,
+};
+
+static int __devinit tiadc_probe(struct platform_device *pdev)
+{
+       struct iio_dev          *indio_dev;
+       struct tiadc_device     *adc_dev;
+       struct ti_tscadc_dev    *tscadc_dev = pdev->dev.platform_data;
+       struct mfd_tscadc_board *pdata;
+       int                     err;
+
+       pdata = tscadc_dev->dev->platform_data;
+       if (!pdata || !pdata->adc_init) {
+               dev_err(&pdev->dev, "Could not find platform data\n");
+               return -EINVAL;
+       }
+
+       indio_dev = iio_device_alloc(sizeof(struct tiadc_device));
+       if (indio_dev == NULL) {
+               dev_err(&pdev->dev, "failed to allocate iio device\n");
+               err = -ENOMEM;
+               goto err_ret;
+       }
+       adc_dev = iio_priv(indio_dev);
+
+       adc_dev->mfd_tscadc = tscadc_dev;
+       adc_dev->channels = pdata->adc_init->adc_channels;
+
+       indio_dev->dev.parent = &pdev->dev;
+       indio_dev->name = dev_name(&pdev->dev);
+       indio_dev->modes = INDIO_DIRECT_MODE;
+       indio_dev->info = &tiadc_info;
+
+       tiadc_step_config(adc_dev);
+
+       err = tiadc_channel_init(indio_dev, adc_dev->channels);
+       if (err < 0)
+               goto err_free_device;
+
+       err = iio_device_register(indio_dev);
+       if (err)
+               goto err_free_channels;
+
+       platform_set_drvdata(pdev, indio_dev);
+
+       return 0;
+
+err_free_channels:
+       tiadc_channels_remove(indio_dev);
+err_free_device:
+       iio_device_free(indio_dev);
+err_ret:
+       return err;
+}
+
+static int __devexit tiadc_remove(struct platform_device *pdev)
+{
+       struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+
+       iio_device_unregister(indio_dev);
+       tiadc_channels_remove(indio_dev);
+
+       iio_device_free(indio_dev);
+
+       return 0;
+}
+
+#ifdef CONFIG_PM
+static int tiadc_suspend(struct device *dev)
+{
+       struct iio_dev *indio_dev = dev_get_drvdata(dev);
+       struct tiadc_device *adc_dev = iio_priv(indio_dev);
+       struct ti_tscadc_dev *tscadc_dev = dev->platform_data;
+       unsigned int idle;
+
+       if (!device_may_wakeup(tscadc_dev->dev)) {
+               idle = tiadc_readl(adc_dev, REG_CTRL);
+               idle &= ~(CNTRLREG_TSCSSENB);
+               tiadc_writel(adc_dev, REG_CTRL, (idle |
+                               CNTRLREG_POWERDOWN));
+       }
+
+       return 0;
+}
+
+static int tiadc_resume(struct device *dev)
+{
+       struct iio_dev *indio_dev = dev_get_drvdata(dev);
+       struct tiadc_device *adc_dev = iio_priv(indio_dev);
+       unsigned int restore;
+
+       /* Make sure ADC is powered up */
+       restore = tiadc_readl(adc_dev, REG_CTRL);
+       restore &= ~(CNTRLREG_POWERDOWN);
+       tiadc_writel(adc_dev, REG_CTRL, restore);
+
+       tiadc_step_config(adc_dev);
+
+       return 0;
+}
+
+static const struct dev_pm_ops tiadc_pm_ops = {
+       .suspend = tiadc_suspend,
+       .resume = tiadc_resume,
+};
+#define TIADC_PM_OPS (&tiadc_pm_ops)
+#else
+#define TIADC_PM_OPS NULL
+#endif
+
+static struct platform_driver tiadc_driver = {
+       .driver = {
+               .name   = "tiadc",
+               .owner  = THIS_MODULE,
+               .pm     = TIADC_PM_OPS,
+       },
+       .probe  = tiadc_probe,
+       .remove = __devexit_p(tiadc_remove),
+};
+
+module_platform_driver(tiadc_driver);
+
+MODULE_DESCRIPTION("TI ADC controller driver");
+MODULE_AUTHOR("Rachna Patil <rachna@ti.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/adc/viperboard_adc.c b/drivers/iio/adc/viperboard_adc.c
new file mode 100644 (file)
index 0000000..10136a8
--- /dev/null
@@ -0,0 +1,181 @@
+/*
+ *  Nano River Technologies viperboard IIO ADC driver
+ *
+ *  (C) 2012 by Lemonage GmbH
+ *  Author: Lars Poeschel <poeschel@lemonage.de>
+ *  All rights reserved.
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the License, or (at your
+ *  option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+
+#include <linux/usb.h>
+#include <linux/iio/iio.h>
+
+#include <linux/mfd/viperboard.h>
+
+#define VPRBRD_ADC_CMD_GET             0x00
+
+struct vprbrd_adc_msg {
+       u8 cmd;
+       u8 chan;
+       u8 val;
+} __packed;
+
+struct vprbrd_adc {
+       struct vprbrd *vb;
+};
+
+#define VPRBRD_ADC_CHANNEL(_index) {                   \
+       .type = IIO_VOLTAGE,                            \
+       .indexed = 1,                                   \
+       .channel = _index,                              \
+       .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT,    \
+       .scan_index = _index,                           \
+       .scan_type = {                                  \
+               .sign = 'u',                            \
+               .realbits = 8,                          \
+               .storagebits = 8,                       \
+       },                                              \
+}
+
+static struct iio_chan_spec const vprbrd_adc_iio_channels[] = {
+       VPRBRD_ADC_CHANNEL(0),
+       VPRBRD_ADC_CHANNEL(1),
+       VPRBRD_ADC_CHANNEL(2),
+       VPRBRD_ADC_CHANNEL(3),
+};
+
+static int vprbrd_iio_read_raw(struct iio_dev *iio_dev,
+                               struct iio_chan_spec const *chan,
+                               int *val,
+                               int *val2,
+                               long info)
+{
+       int ret, error = 0;
+       struct vprbrd_adc *adc = iio_priv(iio_dev);
+       struct vprbrd *vb = adc->vb;
+       struct vprbrd_adc_msg *admsg = (struct vprbrd_adc_msg *)vb->buf;
+
+       switch (info) {
+       case IIO_CHAN_INFO_RAW:
+               mutex_lock(&vb->lock);
+
+               admsg->cmd = VPRBRD_ADC_CMD_GET;
+               admsg->chan = chan->scan_index;
+               admsg->val = 0x00;
+
+               ret = usb_control_msg(vb->usb_dev,
+                       usb_sndctrlpipe(vb->usb_dev, 0), VPRBRD_USB_REQUEST_ADC,
+                       VPRBRD_USB_TYPE_OUT, 0x0000, 0x0000, admsg,
+                       sizeof(struct vprbrd_adc_msg), VPRBRD_USB_TIMEOUT_MS);
+               if (ret != sizeof(struct vprbrd_adc_msg)) {
+                       dev_err(&iio_dev->dev, "usb send error on adc read\n");
+                       error = -EREMOTEIO;
+               }
+
+               ret = usb_control_msg(vb->usb_dev,
+                       usb_rcvctrlpipe(vb->usb_dev, 0), VPRBRD_USB_REQUEST_ADC,
+                       VPRBRD_USB_TYPE_IN, 0x0000, 0x0000, admsg,
+                       sizeof(struct vprbrd_adc_msg), VPRBRD_USB_TIMEOUT_MS);
+
+               *val = admsg->val;
+
+               mutex_unlock(&vb->lock);
+
+               if (ret != sizeof(struct vprbrd_adc_msg)) {
+                       dev_err(&iio_dev->dev, "usb recv error on adc read\n");
+                       error = -EREMOTEIO;
+               }
+
+               if (error)
+                       goto error;
+
+               return IIO_VAL_INT;
+       default:
+               error = -EINVAL;
+               break;
+       }
+error:
+       return error;
+}
+
+static const struct iio_info vprbrd_adc_iio_info = {
+       .read_raw = &vprbrd_iio_read_raw,
+       .driver_module = THIS_MODULE,
+};
+
+static int __devinit vprbrd_adc_probe(struct platform_device *pdev)
+{
+       struct vprbrd *vb = dev_get_drvdata(pdev->dev.parent);
+       struct vprbrd_adc *adc;
+       struct iio_dev *indio_dev;
+       int ret;
+
+       /* registering iio */
+       indio_dev = iio_device_alloc(sizeof(*adc));
+       if (!indio_dev) {
+               dev_err(&pdev->dev, "failed allocating iio device\n");
+               return -ENOMEM;
+       }
+
+       adc = iio_priv(indio_dev);
+       adc->vb = vb;
+       indio_dev->name = "viperboard adc";
+       indio_dev->dev.parent = &pdev->dev;
+       indio_dev->info = &vprbrd_adc_iio_info;
+       indio_dev->modes = INDIO_DIRECT_MODE;
+       indio_dev->channels = vprbrd_adc_iio_channels;
+       indio_dev->num_channels = ARRAY_SIZE(vprbrd_adc_iio_channels);
+
+       ret = iio_device_register(indio_dev);
+       if (ret) {
+               dev_err(&pdev->dev, "could not register iio (adc)");
+               goto error;
+       }
+
+       platform_set_drvdata(pdev, indio_dev);
+
+       return 0;
+
+error:
+       iio_device_free(indio_dev);
+       return ret;
+}
+
+static int __devexit vprbrd_adc_remove(struct platform_device *pdev)
+{
+       struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+
+       iio_device_unregister(indio_dev);
+       iio_device_free(indio_dev);
+
+       return 0;
+}
+
+static struct platform_driver vprbrd_adc_driver = {
+       .driver = {
+               .name   = "viperboard-adc",
+               .owner  = THIS_MODULE,
+       },
+       .probe          = vprbrd_adc_probe,
+       .remove         = __devexit_p(vprbrd_adc_remove),
+};
+
+module_platform_driver(vprbrd_adc_driver);
+
+MODULE_AUTHOR("Lars Poeschel <poeschel@lemonage.de>");
+MODULE_DESCRIPTION("IIO ADC driver for Nano River Techs Viperboard");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:viperboard-adc");
index cf23c46..c09d41b 100644 (file)
@@ -1269,7 +1269,6 @@ static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
                return ioctx;
 
        BUG_ON(ioctx->ch != ch);
-       kref_init(&ioctx->kref);
        spin_lock_init(&ioctx->spinlock);
        ioctx->state = SRPT_STATE_NEW;
        ioctx->n_rbuf = 0;
@@ -1291,39 +1290,6 @@ static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
 }
 
 /**
- * srpt_put_send_ioctx() - Free up resources.
- */
-static void srpt_put_send_ioctx(struct srpt_send_ioctx *ioctx)
-{
-       struct srpt_rdma_ch *ch;
-       unsigned long flags;
-
-       BUG_ON(!ioctx);
-       ch = ioctx->ch;
-       BUG_ON(!ch);
-
-       WARN_ON(srpt_get_cmd_state(ioctx) != SRPT_STATE_DONE);
-
-       srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
-       transport_generic_free_cmd(&ioctx->cmd, 0);
-
-       if (ioctx->n_rbuf > 1) {
-               kfree(ioctx->rbufs);
-               ioctx->rbufs = NULL;
-               ioctx->n_rbuf = 0;
-       }
-
-       spin_lock_irqsave(&ch->spinlock, flags);
-       list_add(&ioctx->free_list, &ch->free_list);
-       spin_unlock_irqrestore(&ch->spinlock, flags);
-}
-
-static void srpt_put_send_ioctx_kref(struct kref *kref)
-{
-       srpt_put_send_ioctx(container_of(kref, struct srpt_send_ioctx, kref));
-}
-
-/**
  * srpt_abort_cmd() - Abort a SCSI command.
  * @ioctx:   I/O context associated with the SCSI command.
  * @context: Preferred execution context.
@@ -1359,8 +1325,14 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
        }
        spin_unlock_irqrestore(&ioctx->spinlock, flags);
 
-       if (state == SRPT_STATE_DONE)
+       if (state == SRPT_STATE_DONE) {
+               struct srpt_rdma_ch *ch = ioctx->ch;
+
+               BUG_ON(ch->sess == NULL);
+
+               target_put_sess_cmd(ch->sess, &ioctx->cmd);
                goto out;
+       }
 
        pr_debug("Aborting cmd with state %d and tag %lld\n", state,
                 ioctx->tag);
@@ -1395,11 +1367,11 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
                spin_lock_irqsave(&ioctx->cmd.t_state_lock, flags);
                ioctx->cmd.transport_state |= CMD_T_LUN_STOP;
                spin_unlock_irqrestore(&ioctx->cmd.t_state_lock, flags);
-               kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
+               target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd);
                break;
        case SRPT_STATE_MGMT_RSP_SENT:
                srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
-               kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
+               target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd);
                break;
        default:
                WARN_ON("ERROR: unexpected command state");
@@ -1457,11 +1429,13 @@ static void srpt_handle_send_comp(struct srpt_rdma_ch *ch,
                    && state != SRPT_STATE_DONE))
                pr_debug("state = %d\n", state);
 
-       if (state != SRPT_STATE_DONE)
-               kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
-       else
+       if (state != SRPT_STATE_DONE) {
+               srpt_unmap_sg_to_ib_sge(ch, ioctx);
+               transport_generic_free_cmd(&ioctx->cmd, 0);
+       } else {
                printk(KERN_ERR "IB completion has been received too late for"
                       " wr_id = %u.\n", ioctx->ioctx.index);
+       }
 }
 
 /**
@@ -1712,10 +1686,10 @@ out_err:
 
 static int srpt_check_stop_free(struct se_cmd *cmd)
 {
-       struct srpt_send_ioctx *ioctx;
+       struct srpt_send_ioctx *ioctx = container_of(cmd,
+                               struct srpt_send_ioctx, cmd);
 
-       ioctx = container_of(cmd, struct srpt_send_ioctx, cmd);
-       return kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
+       return target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd);
 }
 
 /**
@@ -1730,12 +1704,12 @@ static int srpt_handle_cmd(struct srpt_rdma_ch *ch,
        uint64_t unpacked_lun;
        u64 data_len;
        enum dma_data_direction dir;
-       int ret;
+       sense_reason_t ret;
+       int rc;
 
        BUG_ON(!send_ioctx);
 
        srp_cmd = recv_ioctx->ioctx.buf;
-       kref_get(&send_ioctx->kref);
        cmd = &send_ioctx->cmd;
        send_ioctx->tag = srp_cmd->tag;
 
@@ -1755,40 +1729,26 @@ static int srpt_handle_cmd(struct srpt_rdma_ch *ch,
                break;
        }
 
-       ret = srpt_get_desc_tbl(send_ioctx, srp_cmd, &dir, &data_len);
-       if (ret) {
+       if (srpt_get_desc_tbl(send_ioctx, srp_cmd, &dir, &data_len)) {
                printk(KERN_ERR "0x%llx: parsing SRP descriptor table failed.\n",
                       srp_cmd->tag);
-               cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
-               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
-               kref_put(&send_ioctx->kref, srpt_put_send_ioctx_kref);
+               ret = TCM_INVALID_CDB_FIELD;
                goto send_sense;
        }
 
-       cmd->data_length = data_len;
-       cmd->data_direction = dir;
        unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_cmd->lun,
                                       sizeof(srp_cmd->lun));
-       if (transport_lookup_cmd_lun(cmd, unpacked_lun) < 0) {
-               kref_put(&send_ioctx->kref, srpt_put_send_ioctx_kref);
+       rc = target_submit_cmd(cmd, ch->sess, srp_cmd->cdb,
+                       &send_ioctx->sense_data[0], unpacked_lun, data_len,
+                       MSG_SIMPLE_TAG, dir, TARGET_SCF_ACK_KREF);
+       if (rc != 0) {
+               ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
                goto send_sense;
        }
-       ret = target_setup_cmd_from_cdb(cmd, srp_cmd->cdb);
-       if (ret < 0) {
-               kref_put(&send_ioctx->kref, srpt_put_send_ioctx_kref);
-               if (cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT) {
-                       srpt_queue_status(cmd);
-                       return 0;
-               } else
-                       goto send_sense;
-       }
-
-       transport_handle_cdb_direct(cmd);
        return 0;
 
 send_sense:
-       transport_send_check_condition_and_sense(cmd, cmd->scsi_sense_reason,
-                                                0);
+       transport_send_check_condition_and_sense(cmd, ret, 0);
        return -1;
 }
 
@@ -1865,9 +1825,11 @@ static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
 {
        struct srp_tsk_mgmt *srp_tsk;
        struct se_cmd *cmd;
+       struct se_session *sess = ch->sess;
        uint64_t unpacked_lun;
+       uint32_t tag = 0;
        int tcm_tmr;
-       int res;
+       int rc;
 
        BUG_ON(!send_ioctx);
 
@@ -1882,39 +1844,32 @@ static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
        send_ioctx->tag = srp_tsk->tag;
        tcm_tmr = srp_tmr_to_tcm(srp_tsk->tsk_mgmt_func);
        if (tcm_tmr < 0) {
-               send_ioctx->cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
                send_ioctx->cmd.se_tmr_req->response =
                        TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
-               goto process_tmr;
-       }
-       res = core_tmr_alloc_req(cmd, NULL, tcm_tmr, GFP_KERNEL);
-       if (res < 0) {
-               send_ioctx->cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
-               send_ioctx->cmd.se_tmr_req->response = TMR_FUNCTION_REJECTED;
-               goto process_tmr;
+               goto fail;
        }
-
        unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_tsk->lun,
                                       sizeof(srp_tsk->lun));
-       res = transport_lookup_tmr_lun(&send_ioctx->cmd, unpacked_lun);
-       if (res) {
-               pr_debug("rejecting TMR for LUN %lld\n", unpacked_lun);
-               send_ioctx->cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
-               send_ioctx->cmd.se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
-               goto process_tmr;
-       }
-
-       if (srp_tsk->tsk_mgmt_func == SRP_TSK_ABORT_TASK)
-               srpt_rx_mgmt_fn_tag(send_ioctx, srp_tsk->task_tag);
-
-process_tmr:
-       kref_get(&send_ioctx->kref);
-       if (!(send_ioctx->cmd.se_cmd_flags & SCF_SCSI_CDB_EXCEPTION))
-               transport_generic_handle_tmr(&send_ioctx->cmd);
-       else
-               transport_send_check_condition_and_sense(cmd,
-                                               cmd->scsi_sense_reason, 0);
 
+       if (srp_tsk->tsk_mgmt_func == SRP_TSK_ABORT_TASK) {
+               rc = srpt_rx_mgmt_fn_tag(send_ioctx, srp_tsk->task_tag);
+               if (rc < 0) {
+                       send_ioctx->cmd.se_tmr_req->response =
+                                       TMR_TASK_DOES_NOT_EXIST;
+                       goto fail;
+               }
+               tag = srp_tsk->task_tag;
+       }
+       rc = target_submit_tmr(&send_ioctx->cmd, sess, NULL, unpacked_lun,
+                               srp_tsk, tcm_tmr, GFP_KERNEL, tag,
+                               TARGET_SCF_ACK_KREF);
+       if (rc != 0) {
+               send_ioctx->cmd.se_tmr_req->response = TMR_FUNCTION_REJECTED;
+               goto fail;
+       }
+       return;
+fail:
+       transport_send_check_condition_and_sense(cmd, 0, 0); // XXX:
 }
 
 /**
@@ -1956,10 +1911,6 @@ static void srpt_handle_new_iu(struct srpt_rdma_ch *ch,
                }
        }
 
-       transport_init_se_cmd(&send_ioctx->cmd, &srpt_target->tf_ops, ch->sess,
-                             0, DMA_NONE, MSG_SIMPLE_TAG,
-                             send_ioctx->sense_data);
-
        switch (srp_cmd->opcode) {
        case SRP_CMD:
                srpt_handle_cmd(ch, recv_ioctx, send_ioctx);
@@ -2365,6 +2316,7 @@ static void srpt_release_channel_work(struct work_struct *w)
 {
        struct srpt_rdma_ch *ch;
        struct srpt_device *sdev;
+       struct se_session *se_sess;
 
        ch = container_of(w, struct srpt_rdma_ch, release_work);
        pr_debug("ch = %p; ch->sess = %p; release_done = %p\n", ch, ch->sess,
@@ -2373,8 +2325,13 @@ static void srpt_release_channel_work(struct work_struct *w)
        sdev = ch->sport->sdev;
        BUG_ON(!sdev);
 
-       transport_deregister_session_configfs(ch->sess);
-       transport_deregister_session(ch->sess);
+       se_sess = ch->sess;
+       BUG_ON(!se_sess);
+
+       target_wait_for_sess_cmds(se_sess, 0);
+
+       transport_deregister_session_configfs(se_sess);
+       transport_deregister_session(se_sess);
        ch->sess = NULL;
 
        srpt_destroy_ch_ib(ch);
@@ -3099,7 +3056,7 @@ static int srpt_queue_response(struct se_cmd *cmd)
                       ioctx->tag);
                srpt_unmap_sg_to_ib_sge(ch, ioctx);
                srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
-               kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
+               target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd);
        }
 
 out:
@@ -3490,6 +3447,23 @@ static u32 srpt_tpg_get_inst_index(struct se_portal_group *se_tpg)
 
 static void srpt_release_cmd(struct se_cmd *se_cmd)
 {
+       struct srpt_send_ioctx *ioctx = container_of(se_cmd,
+                               struct srpt_send_ioctx, cmd);
+       struct srpt_rdma_ch *ch = ioctx->ch;
+       unsigned long flags;
+
+       WARN_ON(ioctx->state != SRPT_STATE_DONE);
+       WARN_ON(ioctx->mapped_sg_count != 0);
+
+       if (ioctx->n_rbuf > 1) {
+               kfree(ioctx->rbufs);
+               ioctx->rbufs = NULL;
+               ioctx->n_rbuf = 0;
+       }
+
+       spin_lock_irqsave(&ch->spinlock, flags);
+       list_add(&ioctx->free_list, &ch->free_list);
+       spin_unlock_irqrestore(&ch->spinlock, flags);
 }
 
 /**
index 61e52b8..4caf55c 100644 (file)
@@ -228,7 +228,6 @@ struct srpt_recv_ioctx {
 struct srpt_send_ioctx {
        struct srpt_ioctx       ioctx;
        struct srpt_rdma_ch     *ch;
-       struct kref              kref;
        struct rdma_iu          *rdma_ius;
        struct srp_direct_buf   *rbufs;
        struct srp_direct_buf   single_rbuf;
index 77629d3..febead4 100644 (file)
@@ -544,6 +544,7 @@ config KEYBOARD_OMAP
 
 config KEYBOARD_OMAP4
        tristate "TI OMAP4+ keypad support"
+       depends on ARCH_OMAP2PLUS
        select INPUT_MATRIXKMAP
        help
          Say Y here if you want to use the OMAP4+ keypad.
index 3c843cd..3be3acc 100644 (file)
@@ -24,7 +24,6 @@ struct da9052_onkey {
        struct da9052 *da9052;
        struct input_dev *input;
        struct delayed_work work;
-       unsigned int irq;
 };
 
 static void da9052_onkey_query(struct da9052_onkey *onkey)
@@ -76,7 +75,6 @@ static int __devinit da9052_onkey_probe(struct platform_device *pdev)
        struct da9052 *da9052 = dev_get_drvdata(pdev->dev.parent);
        struct da9052_onkey *onkey;
        struct input_dev *input_dev;
-       int irq;
        int error;
 
        if (!da9052) {
@@ -84,13 +82,6 @@ static int __devinit da9052_onkey_probe(struct platform_device *pdev)
                return -EINVAL;
        }
 
-       irq = platform_get_irq_byname(pdev, "ONKEY");
-       if (irq < 0) {
-               dev_err(&pdev->dev,
-                       "Failed to get an IRQ for input device, %d\n", irq);
-               return -EINVAL;
-       }
-
        onkey = kzalloc(sizeof(*onkey), GFP_KERNEL);
        input_dev = input_allocate_device();
        if (!onkey || !input_dev) {
@@ -101,7 +92,6 @@ static int __devinit da9052_onkey_probe(struct platform_device *pdev)
 
        onkey->input = input_dev;
        onkey->da9052 = da9052;
-       onkey->irq = irq;
        INIT_DELAYED_WORK(&onkey->work, da9052_onkey_work);
 
        input_dev->name = "da9052-onkey";
@@ -111,13 +101,11 @@ static int __devinit da9052_onkey_probe(struct platform_device *pdev)
        input_dev->evbit[0] = BIT_MASK(EV_KEY);
        __set_bit(KEY_POWER, input_dev->keybit);
 
-       error = request_threaded_irq(onkey->irq, NULL, da9052_onkey_irq,
-                                    IRQF_TRIGGER_LOW | IRQF_ONESHOT,
-                                    "ONKEY", onkey);
+       error = da9052_request_irq(onkey->da9052, DA9052_IRQ_NONKEY, "ONKEY",
+                           da9052_onkey_irq, onkey);
        if (error < 0) {
                dev_err(onkey->da9052->dev,
-                       "Failed to register ONKEY IRQ %d, error = %d\n",
-                       onkey->irq, error);
+                       "Failed to register ONKEY IRQ: %d\n", error);
                goto err_free_mem;
        }
 
@@ -132,7 +120,7 @@ static int __devinit da9052_onkey_probe(struct platform_device *pdev)
        return 0;
 
 err_free_irq:
-       free_irq(onkey->irq, onkey);
+       da9052_free_irq(onkey->da9052, DA9052_IRQ_NONKEY, onkey);
        cancel_delayed_work_sync(&onkey->work);
 err_free_mem:
        input_free_device(input_dev);
@@ -145,7 +133,7 @@ static int __devexit da9052_onkey_remove(struct platform_device *pdev)
 {
        struct da9052_onkey *onkey = platform_get_drvdata(pdev);
 
-       free_irq(onkey->irq, onkey);
+       da9052_free_irq(onkey->da9052, DA9052_IRQ_NONKEY, onkey);
        cancel_delayed_work_sync(&onkey->work);
 
        input_unregister_device(onkey->input);
index f7668b2..0c45cad 100644 (file)
@@ -529,9 +529,9 @@ config TOUCHSCREEN_TOUCHWIN
          To compile this driver as a module, choose M here: the
          module will be called touchwin.
 
-config TOUCHSCREEN_TI_TSCADC
+config TOUCHSCREEN_TI_AM335X_TSC
        tristate "TI Touchscreen Interface"
-       depends on ARCH_OMAP2PLUS
+       depends on MFD_TI_AM335X_TSCADC
        help
          Say Y here if you have 4/5/8 wire touchscreen controller
          to be connected to the ADC controller on your TI AM335x SoC.
@@ -539,7 +539,7 @@ config TOUCHSCREEN_TI_TSCADC
          If unsure, say N.
 
          To compile this driver as a module, choose M here: the
-         module will be called ti_tscadc.
+         module will be called ti_am335x_tsc.
 
 config TOUCHSCREEN_ATMEL_TSADCC
        tristate "Atmel Touchscreen Interface"
index 178eb12..7c4c78e 100644 (file)
@@ -52,7 +52,7 @@ obj-$(CONFIG_TOUCHSCREEN_PIXCIR)      += pixcir_i2c_ts.o
 obj-$(CONFIG_TOUCHSCREEN_S3C2410)      += s3c2410_ts.o
 obj-$(CONFIG_TOUCHSCREEN_ST1232)       += st1232.o
 obj-$(CONFIG_TOUCHSCREEN_STMPE)                += stmpe-ts.o
-obj-$(CONFIG_TOUCHSCREEN_TI_TSCADC)    += ti_tscadc.o
+obj-$(CONFIG_TOUCHSCREEN_TI_AM335X_TSC)        += ti_am335x_tsc.o
 obj-$(CONFIG_TOUCHSCREEN_TNETV107X)    += tnetv107x-ts.o
 obj-$(CONFIG_TOUCHSCREEN_TOUCHIT213)   += touchit213.o
 obj-$(CONFIG_TOUCHSCREEN_TOUCHRIGHT)   += touchright.o
index e8df341..53133ef 100644 (file)
@@ -27,8 +27,6 @@ struct da9052_tsi {
        struct input_dev *dev;
        struct delayed_work ts_pen_work;
        struct mutex mutex;
-       unsigned int irq_pendwn;
-       unsigned int irq_datardy;
        bool stopped;
        bool adc_on;
 };
@@ -45,8 +43,8 @@ static irqreturn_t da9052_ts_pendwn_irq(int irq, void *data)
 
        if (!tsi->stopped) {
                /* Mask PEN_DOWN event and unmask TSI_READY event */
-               disable_irq_nosync(tsi->irq_pendwn);
-               enable_irq(tsi->irq_datardy);
+               da9052_disable_irq_nosync(tsi->da9052, DA9052_IRQ_PENDOWN);
+               da9052_enable_irq(tsi->da9052, DA9052_IRQ_TSIREADY);
 
                da9052_ts_adc_toggle(tsi, true);
 
@@ -137,8 +135,8 @@ static void da9052_ts_pen_work(struct work_struct *work)
                                return;
 
                        /* Mask TSI_READY event and unmask PEN_DOWN event */
-                       disable_irq(tsi->irq_datardy);
-                       enable_irq(tsi->irq_pendwn);
+                       da9052_disable_irq(tsi->da9052, DA9052_IRQ_TSIREADY);
+                       da9052_enable_irq(tsi->da9052, DA9052_IRQ_PENDOWN);
                }
        }
 }
@@ -197,7 +195,7 @@ static int da9052_ts_input_open(struct input_dev *input_dev)
        mb();
 
        /* Unmask PEN_DOWN event */
-       enable_irq(tsi->irq_pendwn);
+       da9052_enable_irq(tsi->da9052, DA9052_IRQ_PENDOWN);
 
        /* Enable Pen Detect Circuit */
        return da9052_reg_update(tsi->da9052, DA9052_TSI_CONT_A_REG,
@@ -210,11 +208,11 @@ static void da9052_ts_input_close(struct input_dev *input_dev)
 
        tsi->stopped = true;
        mb();
-       disable_irq(tsi->irq_pendwn);
+       da9052_disable_irq(tsi->da9052, DA9052_IRQ_PENDOWN);
        cancel_delayed_work_sync(&tsi->ts_pen_work);
 
        if (tsi->adc_on) {
-               disable_irq(tsi->irq_datardy);
+               da9052_disable_irq(tsi->da9052, DA9052_IRQ_TSIREADY);
                da9052_ts_adc_toggle(tsi, false);
 
                /*
@@ -222,7 +220,7 @@ static void da9052_ts_input_close(struct input_dev *input_dev)
                 * twice and we need to enable it to keep enable/disable
                 * counter balanced. IRQ is still off though.
                 */
-               enable_irq(tsi->irq_pendwn);
+               da9052_enable_irq(tsi->da9052, DA9052_IRQ_PENDOWN);
        }
 
        /* Disable Pen Detect Circuit */
@@ -234,21 +232,12 @@ static int __devinit da9052_ts_probe(struct platform_device *pdev)
        struct da9052 *da9052;
        struct da9052_tsi *tsi;
        struct input_dev *input_dev;
-       int irq_pendwn;
-       int irq_datardy;
        int error;
 
        da9052 = dev_get_drvdata(pdev->dev.parent);
        if (!da9052)
                return -EINVAL;
 
-       irq_pendwn = platform_get_irq_byname(pdev, "PENDWN");
-       irq_datardy = platform_get_irq_byname(pdev, "TSIRDY");
-       if (irq_pendwn < 0 || irq_datardy < 0) {
-               dev_err(da9052->dev, "Unable to determine device interrupts\n");
-               return -ENXIO;
-       }
-
        tsi = kzalloc(sizeof(struct da9052_tsi), GFP_KERNEL);
        input_dev = input_allocate_device();
        if (!tsi || !input_dev) {
@@ -258,8 +247,6 @@ static int __devinit da9052_ts_probe(struct platform_device *pdev)
 
        tsi->da9052 = da9052;
        tsi->dev = input_dev;
-       tsi->irq_pendwn = da9052->irq_base + irq_pendwn;
-       tsi->irq_datardy = da9052->irq_base + irq_datardy;
        tsi->stopped = true;
        INIT_DELAYED_WORK(&tsi->ts_pen_work, da9052_ts_pen_work);
 
@@ -287,31 +274,25 @@ static int __devinit da9052_ts_probe(struct platform_device *pdev)
        /* Disable ADC */
        da9052_ts_adc_toggle(tsi, false);
 
-       error = request_threaded_irq(tsi->irq_pendwn,
-                                    NULL, da9052_ts_pendwn_irq,
-                                    IRQF_TRIGGER_LOW | IRQF_ONESHOT,
-                                    "PENDWN", tsi);
+       error = da9052_request_irq(tsi->da9052, DA9052_IRQ_PENDOWN,
+                               "pendown-irq", da9052_ts_pendwn_irq, tsi);
        if (error) {
                dev_err(tsi->da9052->dev,
-                       "Failed to register PENDWN IRQ %d, error = %d\n",
-                       tsi->irq_pendwn, error);
+                       "Failed to register PENDWN IRQ: %d\n", error);
                goto err_free_mem;
        }
 
-       error = request_threaded_irq(tsi->irq_datardy,
-                                    NULL, da9052_ts_datardy_irq,
-                                    IRQF_TRIGGER_LOW | IRQF_ONESHOT,
-                                    "TSIRDY", tsi);
+       error = da9052_request_irq(tsi->da9052, DA9052_IRQ_TSIREADY,
+                               "tsiready-irq", da9052_ts_datardy_irq, tsi);
        if (error) {
                dev_err(tsi->da9052->dev,
-                       "Failed to register TSIRDY IRQ %d, error = %d\n",
-                       tsi->irq_datardy, error);
+                       "Failed to register TSIRDY IRQ :%d\n", error);
                goto err_free_pendwn_irq;
        }
 
        /* Mask PEN_DOWN and TSI_READY events */
-       disable_irq(tsi->irq_pendwn);
-       disable_irq(tsi->irq_datardy);
+       da9052_disable_irq(tsi->da9052, DA9052_IRQ_PENDOWN);
+       da9052_disable_irq(tsi->da9052, DA9052_IRQ_TSIREADY);
 
        error = da9052_configure_tsi(tsi);
        if (error)
@@ -326,9 +307,9 @@ static int __devinit da9052_ts_probe(struct platform_device *pdev)
        return 0;
 
 err_free_datardy_irq:
-       free_irq(tsi->irq_datardy, tsi);
+       da9052_free_irq(tsi->da9052, DA9052_IRQ_TSIREADY, tsi);
 err_free_pendwn_irq:
-       free_irq(tsi->irq_pendwn, tsi);
+       da9052_free_irq(tsi->da9052, DA9052_IRQ_PENDOWN, tsi);
 err_free_mem:
        kfree(tsi);
        input_free_device(input_dev);
@@ -342,8 +323,8 @@ static int  __devexit da9052_ts_remove(struct platform_device *pdev)
 
        da9052_reg_write(tsi->da9052, DA9052_LDO9_REG, 0x19);
 
-       free_irq(tsi->irq_pendwn, tsi);
-       free_irq(tsi->irq_datardy, tsi);
+       da9052_free_irq(tsi->da9052, DA9052_IRQ_TSIREADY, tsi);
+       da9052_free_irq(tsi->da9052, DA9052_IRQ_PENDOWN, tsi);
 
        input_unregister_device(tsi->dev);
        kfree(tsi);
diff --git a/drivers/input/touchscreen/ti_am335x_tsc.c b/drivers/input/touchscreen/ti_am335x_tsc.c
new file mode 100644 (file)
index 0000000..7a18a8a
--- /dev/null
@@ -0,0 +1,398 @@
+/*
+ * TI Touch Screen driver
+ *
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/input.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/input/ti_am335x_tsc.h>
+#include <linux/delay.h>
+
+#include <linux/mfd/ti_am335x_tscadc.h>
+
+#define ADCFSM_STEPID          0x10
+#define SEQ_SETTLE             275
+#define MAX_12BIT              ((1 << 12) - 1)
+
+struct titsc {
+       struct input_dev        *input;
+       struct ti_tscadc_dev    *mfd_tscadc;
+       unsigned int            irq;
+       unsigned int            wires;
+       unsigned int            x_plate_resistance;
+       bool                    pen_down;
+       int                     steps_to_configure;
+};
+
+static unsigned int titsc_readl(struct titsc *ts, unsigned int reg)
+{
+       return readl(ts->mfd_tscadc->tscadc_base + reg);
+}
+
+static void titsc_writel(struct titsc *tsc, unsigned int reg,
+                                       unsigned int val)
+{
+       writel(val, tsc->mfd_tscadc->tscadc_base + reg);
+}
+
+static void titsc_step_config(struct titsc *ts_dev)
+{
+       unsigned int    config;
+       int i, total_steps;
+
+       /* Configure the Step registers */
+       total_steps = 2 * ts_dev->steps_to_configure;
+
+       config = STEPCONFIG_MODE_HWSYNC |
+                       STEPCONFIG_AVG_16 | STEPCONFIG_XPP;
+       switch (ts_dev->wires) {
+       case 4:
+               config |= STEPCONFIG_INP_AN2 | STEPCONFIG_XNN;
+               break;
+       case 5:
+               config |= STEPCONFIG_YNN |
+                               STEPCONFIG_INP_AN4 | STEPCONFIG_XNN |
+                               STEPCONFIG_YPP;
+               break;
+       case 8:
+               config |= STEPCONFIG_INP_AN2 | STEPCONFIG_XNN;
+               break;
+       }
+
+       for (i = 1; i <= ts_dev->steps_to_configure; i++) {
+               titsc_writel(ts_dev, REG_STEPCONFIG(i), config);
+               titsc_writel(ts_dev, REG_STEPDELAY(i), STEPCONFIG_OPENDLY);
+       }
+
+       config = 0;
+       config = STEPCONFIG_MODE_HWSYNC |
+                       STEPCONFIG_AVG_16 | STEPCONFIG_YNN |
+                       STEPCONFIG_INM_ADCREFM | STEPCONFIG_FIFO1;
+       switch (ts_dev->wires) {
+       case 4:
+               config |= STEPCONFIG_YPP;
+               break;
+       case 5:
+               config |= STEPCONFIG_XPP | STEPCONFIG_INP_AN4 |
+                               STEPCONFIG_XNP | STEPCONFIG_YPN;
+               break;
+       case 8:
+               config |= STEPCONFIG_YPP;
+               break;
+       }
+
+       for (i = (ts_dev->steps_to_configure + 1); i <= total_steps; i++) {
+               titsc_writel(ts_dev, REG_STEPCONFIG(i), config);
+               titsc_writel(ts_dev, REG_STEPDELAY(i), STEPCONFIG_OPENDLY);
+       }
+
+       config = 0;
+       /* Charge step configuration */
+       config = STEPCONFIG_XPP | STEPCONFIG_YNN |
+                       STEPCHARGE_RFP_XPUL | STEPCHARGE_RFM_XNUR |
+                       STEPCHARGE_INM_AN1 | STEPCHARGE_INP_AN1;
+
+       titsc_writel(ts_dev, REG_CHARGECONFIG, config);
+       titsc_writel(ts_dev, REG_CHARGEDELAY, CHARGEDLY_OPENDLY);
+
+       config = 0;
+       /* Configure to calculate pressure */
+       config = STEPCONFIG_MODE_HWSYNC |
+                       STEPCONFIG_AVG_16 | STEPCONFIG_YPP |
+                       STEPCONFIG_XNN | STEPCONFIG_INM_ADCREFM;
+       titsc_writel(ts_dev, REG_STEPCONFIG(total_steps + 1), config);
+       titsc_writel(ts_dev, REG_STEPDELAY(total_steps + 1),
+                       STEPCONFIG_OPENDLY);
+
+       config |= STEPCONFIG_INP_AN3 | STEPCONFIG_FIFO1;
+       titsc_writel(ts_dev, REG_STEPCONFIG(total_steps + 2), config);
+       titsc_writel(ts_dev, REG_STEPDELAY(total_steps + 2),
+                       STEPCONFIG_OPENDLY);
+
+       titsc_writel(ts_dev, REG_SE, STPENB_STEPENB_TC);
+}
+
+static void titsc_read_coordinates(struct titsc *ts_dev,
+                                   unsigned int *x, unsigned int *y)
+{
+       unsigned int fifocount = titsc_readl(ts_dev, REG_FIFO0CNT);
+       unsigned int prev_val_x = ~0, prev_val_y = ~0;
+       unsigned int prev_diff_x = ~0, prev_diff_y = ~0;
+       unsigned int read, diff;
+       unsigned int i, channel;
+
+       /*
+        * Delta filter is used to remove large variations in sampled
+        * values from ADC. The filter tries to predict where the next
+        * coordinate could be. This is done by taking a previous
+        * coordinate and subtracting it form current one. Further the
+        * algorithm compares the difference with that of a present value,
+        * if true the value is reported to the sub system.
+        */
+       for (i = 0; i < fifocount - 1; i++) {
+               read = titsc_readl(ts_dev, REG_FIFO0);
+               channel = read & 0xf0000;
+               channel = channel >> 0x10;
+               if ((channel >= 0) && (channel < ts_dev->steps_to_configure)) {
+                       read &= 0xfff;
+                       diff = abs(read - prev_val_x);
+                       if (diff < prev_diff_x) {
+                               prev_diff_x = diff;
+                               *x = read;
+                       }
+                       prev_val_x = read;
+               }
+
+               read = titsc_readl(ts_dev, REG_FIFO1);
+               channel = read & 0xf0000;
+               channel = channel >> 0x10;
+               if ((channel >= ts_dev->steps_to_configure) &&
+                       (channel < (2 * ts_dev->steps_to_configure - 1))) {
+                       read &= 0xfff;
+                       diff = abs(read - prev_val_y);
+                       if (diff < prev_diff_y) {
+                               prev_diff_y = diff;
+                               *y = read;
+                       }
+                       prev_val_y = read;
+               }
+       }
+}
+
+static irqreturn_t titsc_irq(int irq, void *dev)
+{
+       struct titsc *ts_dev = dev;
+       struct input_dev *input_dev = ts_dev->input;
+       unsigned int status, irqclr = 0;
+       unsigned int x = 0, y = 0;
+       unsigned int z1, z2, z;
+       unsigned int fsm;
+       unsigned int fifo1count, fifo0count;
+       int i;
+
+       status = titsc_readl(ts_dev, REG_IRQSTATUS);
+       if (status & IRQENB_FIFO0THRES) {
+               titsc_read_coordinates(ts_dev, &x, &y);
+
+               z1 = titsc_readl(ts_dev, REG_FIFO0) & 0xfff;
+               z2 = titsc_readl(ts_dev, REG_FIFO1) & 0xfff;
+
+               fifo1count = titsc_readl(ts_dev, REG_FIFO1CNT);
+               for (i = 0; i < fifo1count; i++)
+                       titsc_readl(ts_dev, REG_FIFO1);
+
+               fifo0count = titsc_readl(ts_dev, REG_FIFO0CNT);
+               for (i = 0; i < fifo0count; i++)
+                       titsc_readl(ts_dev, REG_FIFO0);
+
+               if (ts_dev->pen_down && z1 != 0 && z2 != 0) {
+                       /*
+                        * Calculate pressure using formula
+                        * Resistance(touch) = x plate resistance *
+                        * x postion/4096 * ((z2 / z1) - 1)
+                        */
+                       z = z2 - z1;
+                       z *= x;
+                       z *= ts_dev->x_plate_resistance;
+                       z /= z1;
+                       z = (z + 2047) >> 12;
+
+                       if (z <= MAX_12BIT) {
+                               input_report_abs(input_dev, ABS_X, x);
+                               input_report_abs(input_dev, ABS_Y, y);
+                               input_report_abs(input_dev, ABS_PRESSURE, z);
+                               input_report_key(input_dev, BTN_TOUCH, 1);
+                               input_sync(input_dev);
+                       }
+               }
+               irqclr |= IRQENB_FIFO0THRES;
+       }
+
+       /*
+        * Time for sequencer to settle, to read
+        * correct state of the sequencer.
+        */
+       udelay(SEQ_SETTLE);
+
+       status = titsc_readl(ts_dev, REG_RAWIRQSTATUS);
+       if (status & IRQENB_PENUP) {
+               /* Pen up event */
+               fsm = titsc_readl(ts_dev, REG_ADCFSM);
+               if (fsm == ADCFSM_STEPID) {
+                       ts_dev->pen_down = false;
+                       input_report_key(input_dev, BTN_TOUCH, 0);
+                       input_report_abs(input_dev, ABS_PRESSURE, 0);
+                       input_sync(input_dev);
+               } else {
+                       ts_dev->pen_down = true;
+               }
+               irqclr |= IRQENB_PENUP;
+       }
+
+       titsc_writel(ts_dev, REG_IRQSTATUS, irqclr);
+
+       titsc_writel(ts_dev, REG_SE, STPENB_STEPENB_TC);
+       return IRQ_HANDLED;
+}
+
+/*
+ * The functions for inserting/removing driver as a module.
+ */
+
+static int __devinit titsc_probe(struct platform_device *pdev)
+{
+       struct titsc *ts_dev;
+       struct input_dev *input_dev;
+       struct ti_tscadc_dev *tscadc_dev = pdev->dev.platform_data;
+       struct mfd_tscadc_board *pdata;
+       int err;
+
+       pdata = tscadc_dev->dev->platform_data;
+
+       if (!pdata) {
+               dev_err(&pdev->dev, "Could not find platform data\n");
+               return -EINVAL;
+       }
+
+       /* Allocate memory for device */
+       ts_dev = kzalloc(sizeof(struct titsc), GFP_KERNEL);
+       input_dev = input_allocate_device();
+       if (!ts_dev || !input_dev) {
+               dev_err(&pdev->dev, "failed to allocate memory.\n");
+               err = -ENOMEM;
+               goto err_free_mem;
+       }
+
+       tscadc_dev->tsc = ts_dev;
+       ts_dev->mfd_tscadc = tscadc_dev;
+       ts_dev->input = input_dev;
+       ts_dev->irq = tscadc_dev->irq;
+       ts_dev->wires = pdata->tsc_init->wires;
+       ts_dev->x_plate_resistance = pdata->tsc_init->x_plate_resistance;
+       ts_dev->steps_to_configure = pdata->tsc_init->steps_to_configure;
+
+       err = request_irq(ts_dev->irq, titsc_irq,
+                         0, pdev->dev.driver->name, ts_dev);
+       if (err) {
+               dev_err(&pdev->dev, "failed to allocate irq.\n");
+               goto err_free_mem;
+       }
+
+       titsc_writel(ts_dev, REG_IRQENABLE, IRQENB_FIFO0THRES);
+       titsc_step_config(ts_dev);
+       titsc_writel(ts_dev, REG_FIFO0THR, ts_dev->steps_to_configure);
+
+       input_dev->name = "ti-tsc";
+       input_dev->dev.parent = &pdev->dev;
+
+       input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
+       input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
+
+       input_set_abs_params(input_dev, ABS_X, 0, MAX_12BIT, 0, 0);
+       input_set_abs_params(input_dev, ABS_Y, 0, MAX_12BIT, 0, 0);
+       input_set_abs_params(input_dev, ABS_PRESSURE, 0, MAX_12BIT, 0, 0);
+
+       /* register to the input system */
+       err = input_register_device(input_dev);
+       if (err)
+               goto err_free_irq;
+
+       platform_set_drvdata(pdev, ts_dev);
+       return 0;
+
+err_free_irq:
+       free_irq(ts_dev->irq, ts_dev);
+err_free_mem:
+       input_free_device(input_dev);
+       kfree(ts_dev);
+       return err;
+}
+
+static int __devexit titsc_remove(struct platform_device *pdev)
+{
+       struct ti_tscadc_dev *tscadc_dev = pdev->dev.platform_data;
+       struct titsc *ts_dev = tscadc_dev->tsc;
+
+       free_irq(ts_dev->irq, ts_dev);
+
+       input_unregister_device(ts_dev->input);
+
+       platform_set_drvdata(pdev, NULL);
+       kfree(ts_dev);
+       return 0;
+}
+
+#ifdef CONFIG_PM
+static int titsc_suspend(struct device *dev)
+{
+       struct ti_tscadc_dev *tscadc_dev = dev->platform_data;
+       struct titsc *ts_dev = tscadc_dev->tsc;
+       unsigned int idle;
+
+       if (device_may_wakeup(tscadc_dev->dev)) {
+               idle = titsc_readl(ts_dev, REG_IRQENABLE);
+               titsc_writel(ts_dev, REG_IRQENABLE,
+                               (idle | IRQENB_HW_PEN));
+               titsc_writel(ts_dev, REG_IRQWAKEUP, IRQWKUP_ENB);
+       }
+       return 0;
+}
+
+static int titsc_resume(struct device *dev)
+{
+       struct ti_tscadc_dev *tscadc_dev = dev->platform_data;
+       struct titsc *ts_dev = tscadc_dev->tsc;
+
+       if (device_may_wakeup(tscadc_dev->dev)) {
+               titsc_writel(ts_dev, REG_IRQWAKEUP,
+                               0x00);
+               titsc_writel(ts_dev, REG_IRQCLR, IRQENB_HW_PEN);
+       }
+       titsc_step_config(ts_dev);
+       titsc_writel(ts_dev, REG_FIFO0THR,
+                       ts_dev->steps_to_configure);
+       return 0;
+}
+
+static const struct dev_pm_ops titsc_pm_ops = {
+       .suspend = titsc_suspend,
+       .resume  = titsc_resume,
+};
+#define TITSC_PM_OPS (&titsc_pm_ops)
+#else
+#define TITSC_PM_OPS NULL
+#endif
+
+static struct platform_driver ti_tsc_driver = {
+       .probe  = titsc_probe,
+       .remove = __devexit_p(titsc_remove),
+       .driver = {
+               .name   = "tsc",
+               .owner  = THIS_MODULE,
+               .pm     = TITSC_PM_OPS,
+       },
+};
+module_platform_driver(ti_tsc_driver);
+
+MODULE_DESCRIPTION("TI touchscreen controller driver");
+MODULE_AUTHOR("Rachna Patil <rachna@ti.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/ti_tscadc.c b/drivers/input/touchscreen/ti_tscadc.c
deleted file mode 100644 (file)
index d229c74..0000000
+++ /dev/null
@@ -1,486 +0,0 @@
-/*
- * TI Touch Screen driver
- *
- * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/input.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/clk.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/input/ti_tscadc.h>
-#include <linux/delay.h>
-
-#define REG_IRQEOI             0x020
-#define REG_RAWIRQSTATUS       0x024
-#define REG_IRQSTATUS          0x028
-#define REG_IRQENABLE          0x02C
-#define REG_IRQWAKEUP          0x034
-#define REG_CTRL               0x040
-#define REG_ADCFSM             0x044
-#define REG_CLKDIV             0x04C
-#define REG_SE                 0x054
-#define REG_IDLECONFIG         0x058
-#define REG_CHARGECONFIG       0x05C
-#define REG_CHARGEDELAY                0x060
-#define REG_STEPCONFIG(n)      (0x64 + ((n - 1) * 8))
-#define REG_STEPDELAY(n)       (0x68 + ((n - 1) * 8))
-#define REG_STEPCONFIG13       0x0C4
-#define REG_STEPDELAY13                0x0C8
-#define REG_STEPCONFIG14       0x0CC
-#define REG_STEPDELAY14                0x0D0
-#define REG_FIFO0CNT           0xE4
-#define REG_FIFO1THR           0xF4
-#define REG_FIFO0              0x100
-#define REG_FIFO1              0x200
-
-/*     Register Bitfields      */
-#define IRQWKUP_ENB            BIT(0)
-#define STPENB_STEPENB         0x7FFF
-#define IRQENB_FIFO1THRES      BIT(5)
-#define IRQENB_PENUP           BIT(9)
-#define STEPCONFIG_MODE_HWSYNC 0x2
-#define STEPCONFIG_SAMPLES_AVG (1 << 4)
-#define STEPCONFIG_XPP         (1 << 5)
-#define STEPCONFIG_XNN         (1 << 6)
-#define STEPCONFIG_YPP         (1 << 7)
-#define STEPCONFIG_YNN         (1 << 8)
-#define STEPCONFIG_XNP         (1 << 9)
-#define STEPCONFIG_YPN         (1 << 10)
-#define STEPCONFIG_INM         (1 << 18)
-#define STEPCONFIG_INP         (1 << 20)
-#define STEPCONFIG_INP_5       (1 << 21)
-#define STEPCONFIG_FIFO1       (1 << 26)
-#define STEPCONFIG_OPENDLY     0xff
-#define STEPCONFIG_Z1          (3 << 19)
-#define STEPIDLE_INP           (1 << 22)
-#define STEPCHARGE_RFP         (1 << 12)
-#define STEPCHARGE_INM         (1 << 15)
-#define STEPCHARGE_INP         (1 << 19)
-#define STEPCHARGE_RFM         (1 << 23)
-#define STEPCHARGE_DELAY       0x1
-#define CNTRLREG_TSCSSENB      (1 << 0)
-#define CNTRLREG_STEPID                (1 << 1)
-#define CNTRLREG_STEPCONFIGWRT (1 << 2)
-#define CNTRLREG_4WIRE         (1 << 5)
-#define CNTRLREG_5WIRE         (1 << 6)
-#define CNTRLREG_8WIRE         (3 << 5)
-#define CNTRLREG_TSCENB                (1 << 7)
-#define ADCFSM_STEPID          0x10
-
-#define SEQ_SETTLE             275
-#define ADC_CLK                        3000000
-#define MAX_12BIT              ((1 << 12) - 1)
-#define TSCADC_DELTA_X         15
-#define TSCADC_DELTA_Y         15
-
-struct tscadc {
-       struct input_dev        *input;
-       struct clk              *tsc_ick;
-       void __iomem            *tsc_base;
-       unsigned int            irq;
-       unsigned int            wires;
-       unsigned int            x_plate_resistance;
-       bool                    pen_down;
-};
-
-static unsigned int tscadc_readl(struct tscadc *ts, unsigned int reg)
-{
-       return readl(ts->tsc_base + reg);
-}
-
-static void tscadc_writel(struct tscadc *tsc, unsigned int reg,
-                                       unsigned int val)
-{
-       writel(val, tsc->tsc_base + reg);
-}
-
-static void tscadc_step_config(struct tscadc *ts_dev)
-{
-       unsigned int    config;
-       int i;
-
-       /* Configure the Step registers */
-
-       config = STEPCONFIG_MODE_HWSYNC |
-                       STEPCONFIG_SAMPLES_AVG | STEPCONFIG_XPP;
-       switch (ts_dev->wires) {
-       case 4:
-               config |= STEPCONFIG_INP | STEPCONFIG_XNN;
-               break;
-       case 5:
-               config |= STEPCONFIG_YNN |
-                               STEPCONFIG_INP_5 | STEPCONFIG_XNN |
-                               STEPCONFIG_YPP;
-               break;
-       case 8:
-               config |= STEPCONFIG_INP | STEPCONFIG_XNN;
-               break;
-       }
-
-       for (i = 1; i < 7; i++) {
-               tscadc_writel(ts_dev, REG_STEPCONFIG(i), config);
-               tscadc_writel(ts_dev, REG_STEPDELAY(i), STEPCONFIG_OPENDLY);
-       }
-
-       config = 0;
-       config = STEPCONFIG_MODE_HWSYNC |
-                       STEPCONFIG_SAMPLES_AVG | STEPCONFIG_YNN |
-                       STEPCONFIG_INM | STEPCONFIG_FIFO1;
-       switch (ts_dev->wires) {
-       case 4:
-               config |= STEPCONFIG_YPP;
-               break;
-       case 5:
-               config |= STEPCONFIG_XPP | STEPCONFIG_INP_5 |
-                               STEPCONFIG_XNP | STEPCONFIG_YPN;
-               break;
-       case 8:
-               config |= STEPCONFIG_YPP;
-               break;
-       }
-
-       for (i = 7; i < 13; i++) {
-               tscadc_writel(ts_dev, REG_STEPCONFIG(i), config);
-               tscadc_writel(ts_dev, REG_STEPDELAY(i), STEPCONFIG_OPENDLY);
-       }
-
-       config = 0;
-       /* Charge step configuration */
-       config = STEPCONFIG_XPP | STEPCONFIG_YNN |
-                       STEPCHARGE_RFP | STEPCHARGE_RFM |
-                       STEPCHARGE_INM | STEPCHARGE_INP;
-
-       tscadc_writel(ts_dev, REG_CHARGECONFIG, config);
-       tscadc_writel(ts_dev, REG_CHARGEDELAY, STEPCHARGE_DELAY);
-
-       config = 0;
-       /* Configure to calculate pressure */
-       config = STEPCONFIG_MODE_HWSYNC |
-                       STEPCONFIG_SAMPLES_AVG | STEPCONFIG_YPP |
-                       STEPCONFIG_XNN | STEPCONFIG_INM;
-       tscadc_writel(ts_dev, REG_STEPCONFIG13, config);
-       tscadc_writel(ts_dev, REG_STEPDELAY13, STEPCONFIG_OPENDLY);
-
-       config |= STEPCONFIG_Z1 | STEPCONFIG_FIFO1;
-       tscadc_writel(ts_dev, REG_STEPCONFIG14, config);
-       tscadc_writel(ts_dev, REG_STEPDELAY14, STEPCONFIG_OPENDLY);
-
-       tscadc_writel(ts_dev, REG_SE, STPENB_STEPENB);
-}
-
-static void tscadc_idle_config(struct tscadc *ts_config)
-{
-       unsigned int idleconfig;
-
-       idleconfig = STEPCONFIG_YNN |
-                       STEPCONFIG_INM |
-                       STEPCONFIG_YPN | STEPIDLE_INP;
-       tscadc_writel(ts_config, REG_IDLECONFIG, idleconfig);
-}
-
-static void tscadc_read_coordinates(struct tscadc *ts_dev,
-                                   unsigned int *x, unsigned int *y)
-{
-       unsigned int fifocount = tscadc_readl(ts_dev, REG_FIFO0CNT);
-       unsigned int prev_val_x = ~0, prev_val_y = ~0;
-       unsigned int prev_diff_x = ~0, prev_diff_y = ~0;
-       unsigned int read, diff;
-       unsigned int i;
-
-       /*
-        * Delta filter is used to remove large variations in sampled
-        * values from ADC. The filter tries to predict where the next
-        * coordinate could be. This is done by taking a previous
-        * coordinate and subtracting it form current one. Further the
-        * algorithm compares the difference with that of a present value,
-        * if true the value is reported to the sub system.
-        */
-       for (i = 0; i < fifocount - 1; i++) {
-               read = tscadc_readl(ts_dev, REG_FIFO0) & 0xfff;
-               diff = abs(read - prev_val_x);
-               if (diff < prev_diff_x) {
-                       prev_diff_x = diff;
-                       *x = read;
-               }
-               prev_val_x = read;
-
-               read = tscadc_readl(ts_dev, REG_FIFO1) & 0xfff;
-               diff = abs(read - prev_val_y);
-               if (diff < prev_diff_y) {
-                       prev_diff_y = diff;
-                       *y = read;
-               }
-               prev_val_y = read;
-       }
-}
-
-static irqreturn_t tscadc_irq(int irq, void *dev)
-{
-       struct tscadc *ts_dev = dev;
-       struct input_dev *input_dev = ts_dev->input;
-       unsigned int status, irqclr = 0;
-       unsigned int x = 0, y = 0;
-       unsigned int z1, z2, z;
-       unsigned int fsm;
-
-       status = tscadc_readl(ts_dev, REG_IRQSTATUS);
-       if (status & IRQENB_FIFO1THRES) {
-               tscadc_read_coordinates(ts_dev, &x, &y);
-
-               z1 = tscadc_readl(ts_dev, REG_FIFO0) & 0xfff;
-               z2 = tscadc_readl(ts_dev, REG_FIFO1) & 0xfff;
-
-               if (ts_dev->pen_down && z1 != 0 && z2 != 0) {
-                       /*
-                        * Calculate pressure using formula
-                        * Resistance(touch) = x plate resistance *
-                        * x postion/4096 * ((z2 / z1) - 1)
-                        */
-                       z = z2 - z1;
-                       z *= x;
-                       z *= ts_dev->x_plate_resistance;
-                       z /= z1;
-                       z = (z + 2047) >> 12;
-
-                       if (z <= MAX_12BIT) {
-                               input_report_abs(input_dev, ABS_X, x);
-                               input_report_abs(input_dev, ABS_Y, y);
-                               input_report_abs(input_dev, ABS_PRESSURE, z);
-                               input_report_key(input_dev, BTN_TOUCH, 1);
-                               input_sync(input_dev);
-                       }
-               }
-               irqclr |= IRQENB_FIFO1THRES;
-       }
-
-       /*
-        * Time for sequencer to settle, to read
-        * correct state of the sequencer.
-        */
-       udelay(SEQ_SETTLE);
-
-       status = tscadc_readl(ts_dev, REG_RAWIRQSTATUS);
-       if (status & IRQENB_PENUP) {
-               /* Pen up event */
-               fsm = tscadc_readl(ts_dev, REG_ADCFSM);
-               if (fsm == ADCFSM_STEPID) {
-                       ts_dev->pen_down = false;
-                       input_report_key(input_dev, BTN_TOUCH, 0);
-                       input_report_abs(input_dev, ABS_PRESSURE, 0);
-                       input_sync(input_dev);
-               } else {
-                       ts_dev->pen_down = true;
-               }
-               irqclr |= IRQENB_PENUP;
-       }
-
-       tscadc_writel(ts_dev, REG_IRQSTATUS, irqclr);
-       /* check pending interrupts */
-       tscadc_writel(ts_dev, REG_IRQEOI, 0x0);
-
-       tscadc_writel(ts_dev, REG_SE, STPENB_STEPENB);
-       return IRQ_HANDLED;
-}
-
-/*
- * The functions for inserting/removing driver as a module.
- */
-
-static int __devinit tscadc_probe(struct platform_device *pdev)
-{
-       const struct tsc_data *pdata = pdev->dev.platform_data;
-       struct resource *res;
-       struct tscadc *ts_dev;
-       struct input_dev *input_dev;
-       struct clk *clk;
-       int err;
-       int clk_value, ctrl, irq;
-
-       if (!pdata) {
-               dev_err(&pdev->dev, "missing platform data.\n");
-               return -EINVAL;
-       }
-
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(&pdev->dev, "no memory resource defined.\n");
-               return -EINVAL;
-       }
-
-       irq = platform_get_irq(pdev, 0);
-       if (irq < 0) {
-               dev_err(&pdev->dev, "no irq ID is specified.\n");
-               return -EINVAL;
-       }
-
-       /* Allocate memory for device */
-       ts_dev = kzalloc(sizeof(struct tscadc), GFP_KERNEL);
-       input_dev = input_allocate_device();
-       if (!ts_dev || !input_dev) {
-               dev_err(&pdev->dev, "failed to allocate memory.\n");
-               err = -ENOMEM;
-               goto err_free_mem;
-       }
-
-       ts_dev->input = input_dev;
-       ts_dev->irq = irq;
-       ts_dev->wires = pdata->wires;
-       ts_dev->x_plate_resistance = pdata->x_plate_resistance;
-
-       res = request_mem_region(res->start, resource_size(res), pdev->name);
-       if (!res) {
-               dev_err(&pdev->dev, "failed to reserve registers.\n");
-               err = -EBUSY;
-               goto err_free_mem;
-       }
-
-       ts_dev->tsc_base = ioremap(res->start, resource_size(res));
-       if (!ts_dev->tsc_base) {
-               dev_err(&pdev->dev, "failed to map registers.\n");
-               err = -ENOMEM;
-               goto err_release_mem_region;
-       }
-
-       err = request_irq(ts_dev->irq, tscadc_irq,
-                         0, pdev->dev.driver->name, ts_dev);
-       if (err) {
-               dev_err(&pdev->dev, "failed to allocate irq.\n");
-               goto err_unmap_regs;
-       }
-
-       ts_dev->tsc_ick = clk_get(&pdev->dev, "adc_tsc_ick");
-       if (IS_ERR(ts_dev->tsc_ick)) {
-               dev_err(&pdev->dev, "failed to get TSC ick\n");
-               goto err_free_irq;
-       }
-       clk_enable(ts_dev->tsc_ick);
-
-       clk = clk_get(&pdev->dev, "adc_tsc_fck");
-       if (IS_ERR(clk)) {
-               dev_err(&pdev->dev, "failed to get TSC fck\n");
-               err = PTR_ERR(clk);
-               goto err_disable_clk;
-       }
-
-       clk_value = clk_get_rate(clk) / ADC_CLK;
-       clk_put(clk);
-
-       if (clk_value < 7) {
-               dev_err(&pdev->dev, "clock input less than min clock requirement\n");
-               goto err_disable_clk;
-       }
-       /* CLKDIV needs to be configured to the value minus 1 */
-       tscadc_writel(ts_dev, REG_CLKDIV, clk_value - 1);
-
-        /* Enable wake-up of the SoC using touchscreen */
-       tscadc_writel(ts_dev, REG_IRQWAKEUP, IRQWKUP_ENB);
-
-       ctrl = CNTRLREG_STEPCONFIGWRT |
-                       CNTRLREG_TSCENB |
-                       CNTRLREG_STEPID;
-       switch (ts_dev->wires) {
-       case 4:
-               ctrl |= CNTRLREG_4WIRE;
-               break;
-       case 5:
-               ctrl |= CNTRLREG_5WIRE;
-               break;
-       case 8:
-               ctrl |= CNTRLREG_8WIRE;
-               break;
-       }
-       tscadc_writel(ts_dev, REG_CTRL, ctrl);
-
-       tscadc_idle_config(ts_dev);
-       tscadc_writel(ts_dev, REG_IRQENABLE, IRQENB_FIFO1THRES);
-       tscadc_step_config(ts_dev);
-       tscadc_writel(ts_dev, REG_FIFO1THR, 6);
-
-       ctrl |= CNTRLREG_TSCSSENB;
-       tscadc_writel(ts_dev, REG_CTRL, ctrl);
-
-       input_dev->name = "ti-tsc-adc";
-       input_dev->dev.parent = &pdev->dev;
-
-       input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
-       input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
-
-       input_set_abs_params(input_dev, ABS_X, 0, MAX_12BIT, 0, 0);
-       input_set_abs_params(input_dev, ABS_Y, 0, MAX_12BIT, 0, 0);
-       input_set_abs_params(input_dev, ABS_PRESSURE, 0, MAX_12BIT, 0, 0);
-
-       /* register to the input system */
-       err = input_register_device(input_dev);
-       if (err)
-               goto err_disable_clk;
-
-       platform_set_drvdata(pdev, ts_dev);
-       return 0;
-
-err_disable_clk:
-       clk_disable(ts_dev->tsc_ick);
-       clk_put(ts_dev->tsc_ick);
-err_free_irq:
-       free_irq(ts_dev->irq, ts_dev);
-err_unmap_regs:
-       iounmap(ts_dev->tsc_base);
-err_release_mem_region:
-       release_mem_region(res->start, resource_size(res));
-err_free_mem:
-       input_free_device(input_dev);
-       kfree(ts_dev);
-       return err;
-}
-
-static int __devexit tscadc_remove(struct platform_device *pdev)
-{
-       struct tscadc *ts_dev = platform_get_drvdata(pdev);
-       struct resource *res;
-
-       free_irq(ts_dev->irq, ts_dev);
-
-       input_unregister_device(ts_dev->input);
-
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       iounmap(ts_dev->tsc_base);
-       release_mem_region(res->start, resource_size(res));
-
-       clk_disable(ts_dev->tsc_ick);
-       clk_put(ts_dev->tsc_ick);
-
-       kfree(ts_dev);
-
-       platform_set_drvdata(pdev, NULL);
-       return 0;
-}
-
-static struct platform_driver ti_tsc_driver = {
-       .probe  = tscadc_probe,
-       .remove = __devexit_p(tscadc_remove),
-       .driver = {
-               .name   = "tsc",
-               .owner  = THIS_MODULE,
-       },
-};
-module_platform_driver(ti_tsc_driver);
-
-MODULE_DESCRIPTION("TI touchscreen controller driver");
-MODULE_AUTHOR("Rachna Patil <rachna@ti.com>");
-MODULE_LICENSE("GPL");
index b63987c..1c0abd4 100644 (file)
@@ -104,6 +104,17 @@ config MFD_TI_SSP
          To compile this driver as a module, choose M here: the
          module will be called ti-ssp.
 
+config MFD_TI_AM335X_TSCADC
+       tristate "TI ADC / Touch Screen chip support"
+       select MFD_CORE
+       select REGMAP
+       select REGMAP_MMIO
+       help
+         If you say yes here you get support for Texas Instruments series
+         of Touch Screen /ADC chips.
+         To compile this driver as a module, choose M here: the
+         module will be called ti_am335x_tscadc.
+
 config HTC_EGPIO
        bool "HTC EGPIO support"
        depends on GENERIC_HARDIRQS && GPIOLIB && ARM
@@ -253,6 +264,20 @@ config MFD_TPS65912_SPI
          If you say yes here you get support for the TPS65912 series of
          PM chips with SPI interface.
 
+config MFD_TPS80031
+       bool "TI TPS80031/TPS80032 Power Management chips"
+       depends on I2C=y && GENERIC_HARDIRQS
+       select MFD_CORE
+       select REGMAP_I2C
+       select REGMAP_IRQ
+       help
+         If you say yes here you get support for the Texas Instruments
+         TPS80031/ TPS80032 Fully Integrated Power Management with Power
+         Path and Battery Charger. The device provides five configurable
+         step-down converters, 11 general purpose LDOs, USB OTG Module,
+         ADC, RTC, 2 PWM, System Voltage Regulator/Battery Charger with
+         Power Path from USB, 32K clock generator.
+
 config MENELAUS
        bool "Texas Instruments TWL92330/Menelaus PM chip"
        depends on I2C=y && ARCH_OMAP2
@@ -309,10 +334,10 @@ config MFD_TWL4030_AUDIO
 
 config TWL6040_CORE
        bool "Support for TWL6040 audio codec"
-       depends on I2C=y && GENERIC_HARDIRQS
+       depends on I2C=y
        select MFD_CORE
        select REGMAP_I2C
-       select IRQ_DOMAIN
+       select REGMAP_IRQ
        default n
        help
          Say yes here if you want support for Texas Instruments TWL6040 audio
@@ -990,6 +1015,7 @@ config MFD_TPS65090
        depends on I2C=y && GENERIC_HARDIRQS
        select MFD_CORE
        select REGMAP_I2C
+       select REGMAP_IRQ
        help
          If you say yes here you get support for the TPS65090 series of
          Power Management chips.
@@ -1034,6 +1060,7 @@ config MFD_STA2X11
        bool "STA2X11 multi function device support"
        depends on STA2X11
        select MFD_CORE
+       select REGMAP_MMIO
 
 config MFD_SYSCON
        bool "System Controller Register R/W Based on Regmap"
@@ -1053,6 +1080,38 @@ config MFD_PALMAS
          If you say yes here you get support for the Palmas
          series of PMIC chips from Texas Instruments.
 
+config MFD_VIPERBOARD
+        tristate "Support for Nano River Technologies Viperboard"
+       select MFD_CORE
+       depends on USB
+       default n
+       help
+         Say yes here if you want support for Nano River Technologies
+         Viperboard.
+         There are mfd cell drivers available for i2c master, adc and
+         both gpios found on the board. The spi part does not yet
+         have a driver.
+         You need to select the mfd cell drivers separately.
+         The drivers do not support all features the board exposes.
+
+config MFD_RETU
+       tristate "Support for Retu multi-function device"
+       select MFD_CORE
+       depends on I2C
+       select REGMAP_IRQ
+       help
+         Retu is a multi-function device found on Nokia Internet Tablets
+         (770, N800 and N810).
+
+config MFD_AS3711
+       bool "Support for AS3711"
+       select MFD_CORE
+       select REGMAP_I2C
+       select REGMAP_IRQ
+       depends on I2C=y
+       help
+         Support for the AS3711 PMIC from AMS
+
 endmenu
 endif
 
index 69f260a..8b977f8 100644 (file)
@@ -19,6 +19,7 @@ obj-$(CONFIG_HTC_I2CPLD)      += htc-i2cpld.o
 obj-$(CONFIG_MFD_DAVINCI_VOICECODEC)   += davinci_voicecodec.o
 obj-$(CONFIG_MFD_DM355EVM_MSP) += dm355evm_msp.o
 obj-$(CONFIG_MFD_TI_SSP)       += ti-ssp.o
+obj-$(CONFIG_MFD_TI_AM335X_TSCADC)     += ti_am335x_tscadc.o
 
 obj-$(CONFIG_MFD_STA2X11)      += sta2x11-mfd.o
 obj-$(CONFIG_MFD_STMPE)                += stmpe.o
@@ -55,18 +56,19 @@ obj-$(CONFIG_TPS6105X)              += tps6105x.o
 obj-$(CONFIG_TPS65010)         += tps65010.o
 obj-$(CONFIG_TPS6507X)         += tps6507x.o
 obj-$(CONFIG_MFD_TPS65217)     += tps65217.o
-obj-$(CONFIG_MFD_TPS65910)     += tps65910.o tps65910-irq.o
+obj-$(CONFIG_MFD_TPS65910)     += tps65910.o
 tps65912-objs                   := tps65912-core.o tps65912-irq.o
 obj-$(CONFIG_MFD_TPS65912)     += tps65912.o
 obj-$(CONFIG_MFD_TPS65912_I2C) += tps65912-i2c.o
 obj-$(CONFIG_MFD_TPS65912_SPI)  += tps65912-spi.o
+obj-$(CONFIG_MFD_TPS80031)     += tps80031.o
 obj-$(CONFIG_MENELAUS)         += menelaus.o
 
 obj-$(CONFIG_TWL4030_CORE)     += twl-core.o twl4030-irq.o twl6030-irq.o
 obj-$(CONFIG_TWL4030_MADC)      += twl4030-madc.o
 obj-$(CONFIG_TWL4030_POWER)    += twl4030-power.o
 obj-$(CONFIG_MFD_TWL4030_AUDIO)        += twl4030-audio.o
-obj-$(CONFIG_TWL6040_CORE)     += twl6040-core.o twl6040-irq.o
+obj-$(CONFIG_TWL6040_CORE)     += twl6040.o
 
 obj-$(CONFIG_MFD_MC13XXX)      += mc13xxx-core.o
 obj-$(CONFIG_MFD_MC13XXX_SPI)  += mc13xxx-spi.o
@@ -89,6 +91,7 @@ obj-$(CONFIG_UCB1400_CORE)    += ucb1400_core.o
 
 obj-$(CONFIG_PMIC_DA903X)      += da903x.o
 
+obj-$(CONFIG_PMIC_DA9052)      += da9052-irq.o
 obj-$(CONFIG_PMIC_DA9052)      += da9052-core.o
 obj-$(CONFIG_MFD_DA9052_SPI)   += da9052-spi.o
 obj-$(CONFIG_MFD_DA9052_I2C)   += da9052-i2c.o
@@ -137,8 +140,11 @@ obj-$(CONFIG_MFD_TPS65090) += tps65090.o
 obj-$(CONFIG_MFD_AAT2870_CORE) += aat2870-core.o
 obj-$(CONFIG_MFD_INTEL_MSIC)   += intel_msic.o
 obj-$(CONFIG_MFD_PALMAS)       += palmas.o
+obj-$(CONFIG_MFD_VIPERBOARD)    += viperboard.o
 obj-$(CONFIG_MFD_RC5T583)      += rc5t583.o rc5t583-irq.o
 obj-$(CONFIG_MFD_SEC_CORE)     += sec-core.o sec-irq.o
 obj-$(CONFIG_MFD_SYSCON)       += syscon.o
 obj-$(CONFIG_MFD_LM3533)       += lm3533-core.o lm3533-ctrlbank.o
 obj-$(CONFIG_VEXPRESS_CONFIG)  += vexpress-config.o vexpress-sysreg.o
+obj-$(CONFIG_MFD_RETU)         += retu-mfd.o
+obj-$(CONFIG_MFD_AS3711)       += as3711.o
index 59da165..e1650ba 100644 (file)
@@ -586,38 +586,6 @@ int ab8500_suspend(struct ab8500 *ab8500)
                return 0;
 }
 
-/* AB8500 GPIO Resources */
-static struct resource __devinitdata ab8500_gpio_resources[] = {
-       {
-               .name   = "GPIO_INT6",
-               .start  = AB8500_INT_GPIO6R,
-               .end    = AB8500_INT_GPIO41F,
-               .flags  = IORESOURCE_IRQ,
-       }
-};
-
-/* AB9540 GPIO Resources */
-static struct resource __devinitdata ab9540_gpio_resources[] = {
-       {
-               .name   = "GPIO_INT6",
-               .start  = AB8500_INT_GPIO6R,
-               .end    = AB8500_INT_GPIO41F,
-               .flags  = IORESOURCE_IRQ,
-       },
-       {
-               .name   = "GPIO_INT14",
-               .start  = AB9540_INT_GPIO50R,
-               .end    = AB9540_INT_GPIO54R,
-               .flags  = IORESOURCE_IRQ,
-       },
-       {
-               .name   = "GPIO_INT15",
-               .start  = AB9540_INT_GPIO50F,
-               .end    = AB9540_INT_GPIO54F,
-               .flags  = IORESOURCE_IRQ,
-       }
-};
-
 static struct resource ab8500_gpadc_resources[] = {
        {
                .name   = "HW_CONV_END",
@@ -979,6 +947,10 @@ static struct mfd_cell abx500_common_devs[] = {
                .of_compatible = "stericsson,ab8500-regulator",
        },
        {
+               .name = "abx500-clk",
+               .of_compatible = "stericsson,abx500-clk",
+       },
+       {
                .name = "ab8500-gpadc",
                .of_compatible = "stericsson,ab8500-gpadc",
                .num_resources = ARRAY_SIZE(ab8500_gpadc_resources),
@@ -1080,8 +1052,6 @@ static struct mfd_cell ab8500_devs[] = {
        {
                .name = "ab8500-gpio",
                .of_compatible = "stericsson,ab8500-gpio",
-               .num_resources = ARRAY_SIZE(ab8500_gpio_resources),
-               .resources = ab8500_gpio_resources,
        },
        {
                .name = "ab8500-usb",
@@ -1098,8 +1068,6 @@ static struct mfd_cell ab8500_devs[] = {
 static struct mfd_cell ab9540_devs[] = {
        {
                .name = "ab8500-gpio",
-               .num_resources = ARRAY_SIZE(ab9540_gpio_resources),
-               .resources = ab9540_gpio_resources,
        },
        {
                .name = "ab9540-usb",
@@ -1284,7 +1252,7 @@ static int ab8500_probe(struct platform_device *pdev)
        int i;
        u8 value;
 
-       ab8500 = kzalloc(sizeof *ab8500, GFP_KERNEL);
+       ab8500 = devm_kzalloc(&pdev->dev, sizeof *ab8500, GFP_KERNEL);
        if (!ab8500)
                return -ENOMEM;
 
@@ -1294,10 +1262,8 @@ static int ab8500_probe(struct platform_device *pdev)
        ab8500->dev = &pdev->dev;
 
        resource = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
-       if (!resource) {
-               ret = -ENODEV;
-               goto out_free_ab8500;
-       }
+       if (!resource)
+               return -ENODEV;
 
        ab8500->irq = resource->start;
 
@@ -1320,7 +1286,7 @@ static int ab8500_probe(struct platform_device *pdev)
                ret = get_register_interruptible(ab8500, AB8500_MISC,
                        AB8500_IC_NAME_REG, &value);
                if (ret < 0)
-                       goto out_free_ab8500;
+                       return ret;
 
                ab8500->version = value;
        }
@@ -1328,7 +1294,7 @@ static int ab8500_probe(struct platform_device *pdev)
        ret = get_register_interruptible(ab8500, AB8500_MISC,
                AB8500_REV_REG, &value);
        if (ret < 0)
-               goto out_free_ab8500;
+               return ret;
 
        ab8500->chip_id = value;
 
@@ -1345,14 +1311,13 @@ static int ab8500_probe(struct platform_device *pdev)
                ab8500->mask_size = AB8500_NUM_IRQ_REGS;
                ab8500->irq_reg_offset = ab8500_irq_regoffset;
        }
-       ab8500->mask = kzalloc(ab8500->mask_size, GFP_KERNEL);
+       ab8500->mask = devm_kzalloc(&pdev->dev, ab8500->mask_size, GFP_KERNEL);
        if (!ab8500->mask)
                return -ENOMEM;
-       ab8500->oldmask = kzalloc(ab8500->mask_size, GFP_KERNEL);
-       if (!ab8500->oldmask) {
-               ret = -ENOMEM;
-               goto out_freemask;
-       }
+       ab8500->oldmask = devm_kzalloc(&pdev->dev, ab8500->mask_size, GFP_KERNEL);
+       if (!ab8500->oldmask)
+               return -ENOMEM;
+
        /*
         * ab8500 has switched off due to (SWITCH_OFF_STATUS):
         * 0x01 Swoff bit programming
@@ -1406,37 +1371,37 @@ static int ab8500_probe(struct platform_device *pdev)
 
        ret = abx500_register_ops(ab8500->dev, &ab8500_ops);
        if (ret)
-               goto out_freeoldmask;
+               return ret;
 
        for (i = 0; i < ab8500->mask_size; i++)
                ab8500->mask[i] = ab8500->oldmask[i] = 0xff;
 
        ret = ab8500_irq_init(ab8500, np);
        if (ret)
-               goto out_freeoldmask;
+               return ret;
 
        /*  Activate this feature only in ab9540 */
        /*  till tests are done on ab8500 1p2 or later*/
        if (is_ab9540(ab8500)) {
-               ret = request_threaded_irq(ab8500->irq, NULL,
-                                       ab8500_hierarchical_irq,
-                                       IRQF_ONESHOT | IRQF_NO_SUSPEND,
-                                       "ab8500", ab8500);
+               ret = devm_request_threaded_irq(&pdev->dev, ab8500->irq, NULL,
+                                               ab8500_hierarchical_irq,
+                                               IRQF_ONESHOT | IRQF_NO_SUSPEND,
+                                               "ab8500", ab8500);
        }
        else {
-               ret = request_threaded_irq(ab8500->irq, NULL,
-                                       ab8500_irq,
-                                       IRQF_ONESHOT | IRQF_NO_SUSPEND,
-                                       "ab8500", ab8500);
+               ret = devm_request_threaded_irq(&pdev->dev, ab8500->irq, NULL,
+                                               ab8500_irq,
+                                               IRQF_ONESHOT | IRQF_NO_SUSPEND,
+                                               "ab8500", ab8500);
                if (ret)
-                       goto out_freeoldmask;
+                       return ret;
        }
 
        ret = mfd_add_devices(ab8500->dev, 0, abx500_common_devs,
                        ARRAY_SIZE(abx500_common_devs), NULL,
                        ab8500->irq_base, ab8500->domain);
        if (ret)
-               goto out_freeirq;
+               return ret;
 
        if (is_ab9540(ab8500))
                ret = mfd_add_devices(ab8500->dev, 0, ab9540_devs,
@@ -1447,14 +1412,14 @@ static int ab8500_probe(struct platform_device *pdev)
                                ARRAY_SIZE(ab8500_devs), NULL,
                                ab8500->irq_base, ab8500->domain);
        if (ret)
-               goto out_freeirq;
+               return ret;
 
        if (is_ab9540(ab8500) || is_ab8505(ab8500))
                ret = mfd_add_devices(ab8500->dev, 0, ab9540_ab8505_devs,
                                ARRAY_SIZE(ab9540_ab8505_devs), NULL,
                                ab8500->irq_base, ab8500->domain);
        if (ret)
-               goto out_freeirq;
+               return ret;
 
        if (!no_bm) {
                /* Add battery management devices */
@@ -1475,17 +1440,6 @@ static int ab8500_probe(struct platform_device *pdev)
                dev_err(ab8500->dev, "error creating sysfs entries\n");
 
        return ret;
-
-out_freeirq:
-       free_irq(ab8500->irq, ab8500);
-out_freeoldmask:
-       kfree(ab8500->oldmask);
-out_freemask:
-       kfree(ab8500->mask);
-out_free_ab8500:
-       kfree(ab8500);
-
-       return ret;
 }
 
 static int ab8500_remove(struct platform_device *pdev)
@@ -1498,11 +1452,6 @@ static int ab8500_remove(struct platform_device *pdev)
                sysfs_remove_group(&ab8500->dev->kobj, &ab8500_attr_group);
 
        mfd_remove_devices(ab8500->dev);
-       free_irq(ab8500->irq, ab8500);
-
-       kfree(ab8500->oldmask);
-       kfree(ab8500->mask);
-       kfree(ab8500);
 
        return 0;
 }
index c784f46..bc8a3ed 100644 (file)
@@ -292,6 +292,7 @@ int arizona_dev_init(struct arizona *arizona)
        struct device *dev = arizona->dev;
        const char *type_name;
        unsigned int reg, val;
+       int (*apply_patch)(struct arizona *) = NULL;
        int ret, i;
 
        dev_set_drvdata(arizona->dev, arizona);
@@ -391,7 +392,7 @@ int arizona_dev_init(struct arizona *arizona)
                                arizona->type);
                        arizona->type = WM5102;
                }
-               ret = wm5102_patch(arizona);
+               apply_patch = wm5102_patch;
                break;
 #endif
 #ifdef CONFIG_MFD_WM5110
@@ -402,7 +403,7 @@ int arizona_dev_init(struct arizona *arizona)
                                arizona->type);
                        arizona->type = WM5110;
                }
-               ret = wm5110_patch(arizona);
+               apply_patch = wm5110_patch;
                break;
 #endif
        default:
@@ -412,9 +413,6 @@ int arizona_dev_init(struct arizona *arizona)
 
        dev_info(dev, "%s revision %c\n", type_name, arizona->rev + 'A');
 
-       if (ret != 0)
-               dev_err(arizona->dev, "Failed to apply patch: %d\n", ret);
-
        /* If we have a /RESET GPIO we'll already be reset */
        if (!arizona->pdata.reset) {
                regcache_mark_dirty(arizona->regmap);
@@ -438,6 +436,15 @@ int arizona_dev_init(struct arizona *arizona)
                goto err_reset;
        }
 
+       if (apply_patch) {
+               ret = apply_patch(arizona);
+               if (ret != 0) {
+                       dev_err(arizona->dev, "Failed to apply patch: %d\n",
+                               ret);
+                       goto err_reset;
+               }
+       }
+
        for (i = 0; i < ARRAY_SIZE(arizona->pdata.gpio_defaults); i++) {
                if (!arizona->pdata.gpio_defaults[i])
                        continue;
index b1b0091..74713bf 100644 (file)
@@ -224,6 +224,7 @@ int arizona_irq_init(struct arizona *arizona)
        arizona->virq = irq_domain_add_linear(NULL, 2, &arizona_domain_ops,
                                              arizona);
        if (!arizona->virq) {
+               dev_err(arizona->dev, "Failed to add core IRQ domain\n");
                ret = -EINVAL;
                goto err;
        }
diff --git a/drivers/mfd/as3711.c b/drivers/mfd/as3711.c
new file mode 100644 (file)
index 0000000..e994c96
--- /dev/null
@@ -0,0 +1,217 @@
+/*
+ * AS3711 PMIC MFC driver
+ *
+ * Copyright (C) 2012 Renesas Electronics Corporation
+ * Author: Guennadi Liakhovetski, <g.liakhovetski@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License as
+ * published by the Free Software Foundation
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mfd/as3711.h>
+#include <linux/mfd/core.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+enum {
+       AS3711_REGULATOR,
+       AS3711_BACKLIGHT,
+};
+
+/*
+ * Ok to have it static: it is only used during probing and multiple I2C devices
+ * cannot be probed simultaneously. Just make sure to avoid stale data.
+ */
+static struct mfd_cell as3711_subdevs[] = {
+       [AS3711_REGULATOR] = {.name = "as3711-regulator",},
+       [AS3711_BACKLIGHT] = {.name = "as3711-backlight",},
+};
+
+static bool as3711_volatile_reg(struct device *dev, unsigned int reg)
+{
+       switch (reg) {
+       case AS3711_GPIO_SIGNAL_IN:
+       case AS3711_INTERRUPT_STATUS_1:
+       case AS3711_INTERRUPT_STATUS_2:
+       case AS3711_INTERRUPT_STATUS_3:
+       case AS3711_CHARGER_STATUS_1:
+       case AS3711_CHARGER_STATUS_2:
+       case AS3711_REG_STATUS:
+               return true;
+       }
+       return false;
+}
+
+static bool as3711_precious_reg(struct device *dev, unsigned int reg)
+{
+       switch (reg) {
+       case AS3711_INTERRUPT_STATUS_1:
+       case AS3711_INTERRUPT_STATUS_2:
+       case AS3711_INTERRUPT_STATUS_3:
+               return true;
+       }
+       return false;
+}
+
+static bool as3711_readable_reg(struct device *dev, unsigned int reg)
+{
+       switch (reg) {
+       case AS3711_SD_1_VOLTAGE:
+       case AS3711_SD_2_VOLTAGE:
+       case AS3711_SD_3_VOLTAGE:
+       case AS3711_SD_4_VOLTAGE:
+       case AS3711_LDO_1_VOLTAGE:
+       case AS3711_LDO_2_VOLTAGE:
+       case AS3711_LDO_3_VOLTAGE:
+       case AS3711_LDO_4_VOLTAGE:
+       case AS3711_LDO_5_VOLTAGE:
+       case AS3711_LDO_6_VOLTAGE:
+       case AS3711_LDO_7_VOLTAGE:
+       case AS3711_LDO_8_VOLTAGE:
+       case AS3711_SD_CONTROL:
+       case AS3711_GPIO_SIGNAL_OUT:
+       case AS3711_GPIO_SIGNAL_IN:
+       case AS3711_SD_CONTROL_1:
+       case AS3711_SD_CONTROL_2:
+       case AS3711_CURR_CONTROL:
+       case AS3711_CURR1_VALUE:
+       case AS3711_CURR2_VALUE:
+       case AS3711_CURR3_VALUE:
+       case AS3711_STEPUP_CONTROL_1:
+       case AS3711_STEPUP_CONTROL_2:
+       case AS3711_STEPUP_CONTROL_4:
+       case AS3711_STEPUP_CONTROL_5:
+       case AS3711_REG_STATUS:
+       case AS3711_INTERRUPT_STATUS_1:
+       case AS3711_INTERRUPT_STATUS_2:
+       case AS3711_INTERRUPT_STATUS_3:
+       case AS3711_CHARGER_STATUS_1:
+       case AS3711_CHARGER_STATUS_2:
+       case AS3711_ASIC_ID_1:
+       case AS3711_ASIC_ID_2:
+               return true;
+       }
+       return false;
+}
+
+static const struct regmap_config as3711_regmap_config = {
+       .reg_bits = 8,
+       .val_bits = 8,
+       .volatile_reg = as3711_volatile_reg,
+       .readable_reg = as3711_readable_reg,
+       .precious_reg = as3711_precious_reg,
+       .max_register = AS3711_MAX_REGS,
+       .num_reg_defaults_raw = AS3711_MAX_REGS,
+       .cache_type = REGCACHE_RBTREE,
+};
+
+static int as3711_i2c_probe(struct i2c_client *client,
+                           const struct i2c_device_id *id)
+{
+       struct as3711 *as3711;
+       struct as3711_platform_data *pdata = client->dev.platform_data;
+       unsigned int id1, id2;
+       int ret;
+
+       if (!pdata)
+               dev_dbg(&client->dev, "Platform data not found\n");
+
+       as3711 = devm_kzalloc(&client->dev, sizeof(struct as3711), GFP_KERNEL);
+       if (!as3711) {
+               dev_err(&client->dev, "Memory allocation failed\n");
+               return -ENOMEM;
+       }
+
+       as3711->dev = &client->dev;
+       i2c_set_clientdata(client, as3711);
+
+       if (client->irq)
+               dev_notice(&client->dev, "IRQ not supported yet\n");
+
+       as3711->regmap = devm_regmap_init_i2c(client, &as3711_regmap_config);
+       if (IS_ERR(as3711->regmap)) {
+               ret = PTR_ERR(as3711->regmap);
+               dev_err(&client->dev, "regmap initialization failed: %d\n", ret);
+               return ret;
+       }
+
+       ret = regmap_read(as3711->regmap, AS3711_ASIC_ID_1, &id1);
+       if (!ret)
+               ret = regmap_read(as3711->regmap, AS3711_ASIC_ID_2, &id2);
+       if (ret < 0) {
+               dev_err(&client->dev, "regmap_read() failed: %d\n", ret);
+               return ret;
+       }
+       if (id1 != 0x8b)
+               return -ENODEV;
+       dev_info(as3711->dev, "AS3711 detected: %x:%x\n", id1, id2);
+
+       /* We can reuse as3711_subdevs[], it will be copied in mfd_add_devices() */
+       if (pdata) {
+               as3711_subdevs[AS3711_REGULATOR].platform_data = &pdata->regulator;
+               as3711_subdevs[AS3711_REGULATOR].pdata_size = sizeof(pdata->regulator);
+               as3711_subdevs[AS3711_BACKLIGHT].platform_data = &pdata->backlight;
+               as3711_subdevs[AS3711_BACKLIGHT].pdata_size = sizeof(pdata->backlight);
+       } else {
+               as3711_subdevs[AS3711_REGULATOR].platform_data = NULL;
+               as3711_subdevs[AS3711_REGULATOR].pdata_size = 0;
+               as3711_subdevs[AS3711_BACKLIGHT].platform_data = NULL;
+               as3711_subdevs[AS3711_BACKLIGHT].pdata_size = 0;
+       }
+
+       ret = mfd_add_devices(as3711->dev, -1, as3711_subdevs,
+                             ARRAY_SIZE(as3711_subdevs), NULL, 0, NULL);
+       if (ret < 0)
+               dev_err(&client->dev, "add mfd devices failed: %d\n", ret);
+
+       return ret;
+}
+
+static int as3711_i2c_remove(struct i2c_client *client)
+{
+       struct as3711 *as3711 = i2c_get_clientdata(client);
+
+       mfd_remove_devices(as3711->dev);
+       return 0;
+}
+
+static const struct i2c_device_id as3711_i2c_id[] = {
+       {.name = "as3711", .driver_data = 0},
+       {}
+};
+
+MODULE_DEVICE_TABLE(i2c, as3711_i2c_id);
+
+static struct i2c_driver as3711_i2c_driver = {
+       .driver = {
+                  .name = "as3711",
+                  .owner = THIS_MODULE,
+                  },
+       .probe = as3711_i2c_probe,
+       .remove = as3711_i2c_remove,
+       .id_table = as3711_i2c_id,
+};
+
+static int __init as3711_i2c_init(void)
+{
+       return i2c_add_driver(&as3711_i2c_driver);
+}
+/* Initialise early */
+subsys_initcall(as3711_i2c_init);
+
+static void __exit as3711_i2c_exit(void)
+{
+       i2c_del_driver(&as3711_i2c_driver);
+}
+module_exit(as3711_i2c_exit);
+
+MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
+MODULE_DESCRIPTION("AS3711 PMIC driver");
+MODULE_LICENSE("GPL v2");
index 689b747..a3c9613 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/delay.h>
 #include <linux/input.h>
 #include <linux/interrupt.h>
-#include <linux/irq.h>
 #include <linux/mfd/core.h>
 #include <linux/slab.h>
 #include <linux/module.h>
 #include <linux/mfd/da9052/pdata.h>
 #include <linux/mfd/da9052/reg.h>
 
-#define DA9052_NUM_IRQ_REGS            4
-#define DA9052_IRQ_MASK_POS_1          0x01
-#define DA9052_IRQ_MASK_POS_2          0x02
-#define DA9052_IRQ_MASK_POS_3          0x04
-#define DA9052_IRQ_MASK_POS_4          0x08
-#define DA9052_IRQ_MASK_POS_5          0x10
-#define DA9052_IRQ_MASK_POS_6          0x20
-#define DA9052_IRQ_MASK_POS_7          0x40
-#define DA9052_IRQ_MASK_POS_8          0x80
-
 static bool da9052_reg_readable(struct device *dev, unsigned int reg)
 {
        switch (reg) {
@@ -425,15 +414,6 @@ err:
 }
 EXPORT_SYMBOL_GPL(da9052_adc_manual_read);
 
-static irqreturn_t da9052_auxadc_irq(int irq, void *irq_data)
-{
-       struct da9052 *da9052 = irq_data;
-
-       complete(&da9052->done);
-
-       return IRQ_HANDLED;
-}
-
 int da9052_adc_read_temp(struct da9052 *da9052)
 {
        int tbat;
@@ -447,74 +427,6 @@ int da9052_adc_read_temp(struct da9052 *da9052)
 }
 EXPORT_SYMBOL_GPL(da9052_adc_read_temp);
 
-static struct resource da9052_rtc_resource = {
-       .name = "ALM",
-       .start = DA9052_IRQ_ALARM,
-       .end   = DA9052_IRQ_ALARM,
-       .flags = IORESOURCE_IRQ,
-};
-
-static struct resource da9052_onkey_resource = {
-       .name = "ONKEY",
-       .start = DA9052_IRQ_NONKEY,
-       .end   = DA9052_IRQ_NONKEY,
-       .flags = IORESOURCE_IRQ,
-};
-
-static struct resource da9052_bat_resources[] = {
-       {
-               .name = "BATT TEMP",
-               .start = DA9052_IRQ_TBAT,
-               .end   = DA9052_IRQ_TBAT,
-               .flags = IORESOURCE_IRQ,
-       },
-       {
-               .name = "DCIN DET",
-               .start = DA9052_IRQ_DCIN,
-               .end   = DA9052_IRQ_DCIN,
-               .flags = IORESOURCE_IRQ,
-       },
-       {
-               .name = "DCIN REM",
-               .start = DA9052_IRQ_DCINREM,
-               .end   = DA9052_IRQ_DCINREM,
-               .flags = IORESOURCE_IRQ,
-       },
-       {
-               .name = "VBUS DET",
-               .start = DA9052_IRQ_VBUS,
-               .end   = DA9052_IRQ_VBUS,
-               .flags = IORESOURCE_IRQ,
-       },
-       {
-               .name = "VBUS REM",
-               .start = DA9052_IRQ_VBUSREM,
-               .end   = DA9052_IRQ_VBUSREM,
-               .flags = IORESOURCE_IRQ,
-       },
-       {
-               .name = "CHG END",
-               .start = DA9052_IRQ_CHGEND,
-               .end   = DA9052_IRQ_CHGEND,
-               .flags = IORESOURCE_IRQ,
-       },
-};
-
-static struct resource da9052_tsi_resources[] = {
-       {
-               .name = "PENDWN",
-               .start = DA9052_IRQ_PENDOWN,
-               .end   = DA9052_IRQ_PENDOWN,
-               .flags = IORESOURCE_IRQ,
-       },
-       {
-               .name = "TSIRDY",
-               .start = DA9052_IRQ_TSIREADY,
-               .end   = DA9052_IRQ_TSIREADY,
-               .flags = IORESOURCE_IRQ,
-       },
-};
-
 static struct mfd_cell da9052_subdev_info[] = {
        {
                .name = "da9052-regulator",
@@ -574,13 +486,9 @@ static struct mfd_cell da9052_subdev_info[] = {
        },
        {
                .name = "da9052-onkey",
-               .resources = &da9052_onkey_resource,
-               .num_resources = 1,
        },
        {
                .name = "da9052-rtc",
-               .resources = &da9052_rtc_resource,
-               .num_resources = 1,
        },
        {
                .name = "da9052-gpio",
@@ -602,160 +510,15 @@ static struct mfd_cell da9052_subdev_info[] = {
        },
        {
                .name = "da9052-tsi",
-               .resources = da9052_tsi_resources,
-               .num_resources = ARRAY_SIZE(da9052_tsi_resources),
        },
        {
                .name = "da9052-bat",
-               .resources = da9052_bat_resources,
-               .num_resources = ARRAY_SIZE(da9052_bat_resources),
        },
        {
                .name = "da9052-watchdog",
        },
 };
 
-static struct regmap_irq da9052_irqs[] = {
-       [DA9052_IRQ_DCIN] = {
-               .reg_offset = 0,
-               .mask = DA9052_IRQ_MASK_POS_1,
-       },
-       [DA9052_IRQ_VBUS] = {
-               .reg_offset = 0,
-               .mask = DA9052_IRQ_MASK_POS_2,
-       },
-       [DA9052_IRQ_DCINREM] = {
-               .reg_offset = 0,
-               .mask = DA9052_IRQ_MASK_POS_3,
-       },
-       [DA9052_IRQ_VBUSREM] = {
-               .reg_offset = 0,
-               .mask = DA9052_IRQ_MASK_POS_4,
-       },
-       [DA9052_IRQ_VDDLOW] = {
-               .reg_offset = 0,
-               .mask = DA9052_IRQ_MASK_POS_5,
-       },
-       [DA9052_IRQ_ALARM] = {
-               .reg_offset = 0,
-               .mask = DA9052_IRQ_MASK_POS_6,
-       },
-       [DA9052_IRQ_SEQRDY] = {
-               .reg_offset = 0,
-               .mask = DA9052_IRQ_MASK_POS_7,
-       },
-       [DA9052_IRQ_COMP1V2] = {
-               .reg_offset = 0,
-               .mask = DA9052_IRQ_MASK_POS_8,
-       },
-       [DA9052_IRQ_NONKEY] = {
-               .reg_offset = 1,
-               .mask = DA9052_IRQ_MASK_POS_1,
-       },
-       [DA9052_IRQ_IDFLOAT] = {
-               .reg_offset = 1,
-               .mask = DA9052_IRQ_MASK_POS_2,
-       },
-       [DA9052_IRQ_IDGND] = {
-               .reg_offset = 1,
-               .mask = DA9052_IRQ_MASK_POS_3,
-       },
-       [DA9052_IRQ_CHGEND] = {
-               .reg_offset = 1,
-               .mask = DA9052_IRQ_MASK_POS_4,
-       },
-       [DA9052_IRQ_TBAT] = {
-               .reg_offset = 1,
-               .mask = DA9052_IRQ_MASK_POS_5,
-       },
-       [DA9052_IRQ_ADC_EOM] = {
-               .reg_offset = 1,
-               .mask = DA9052_IRQ_MASK_POS_6,
-       },
-       [DA9052_IRQ_PENDOWN] = {
-               .reg_offset = 1,
-               .mask = DA9052_IRQ_MASK_POS_7,
-       },
-       [DA9052_IRQ_TSIREADY] = {
-               .reg_offset = 1,
-               .mask = DA9052_IRQ_MASK_POS_8,
-       },
-       [DA9052_IRQ_GPI0] = {
-               .reg_offset = 2,
-               .mask = DA9052_IRQ_MASK_POS_1,
-       },
-       [DA9052_IRQ_GPI1] = {
-               .reg_offset = 2,
-               .mask = DA9052_IRQ_MASK_POS_2,
-       },
-       [DA9052_IRQ_GPI2] = {
-               .reg_offset = 2,
-               .mask = DA9052_IRQ_MASK_POS_3,
-       },
-       [DA9052_IRQ_GPI3] = {
-               .reg_offset = 2,
-               .mask = DA9052_IRQ_MASK_POS_4,
-       },
-       [DA9052_IRQ_GPI4] = {
-               .reg_offset = 2,
-               .mask = DA9052_IRQ_MASK_POS_5,
-       },
-       [DA9052_IRQ_GPI5] = {
-               .reg_offset = 2,
-               .mask = DA9052_IRQ_MASK_POS_6,
-       },
-       [DA9052_IRQ_GPI6] = {
-               .reg_offset = 2,
-               .mask = DA9052_IRQ_MASK_POS_7,
-       },
-       [DA9052_IRQ_GPI7] = {
-               .reg_offset = 2,
-               .mask = DA9052_IRQ_MASK_POS_8,
-       },
-       [DA9052_IRQ_GPI8] = {
-               .reg_offset = 3,
-               .mask = DA9052_IRQ_MASK_POS_1,
-       },
-       [DA9052_IRQ_GPI9] = {
-               .reg_offset = 3,
-               .mask = DA9052_IRQ_MASK_POS_2,
-       },
-       [DA9052_IRQ_GPI10] = {
-               .reg_offset = 3,
-               .mask = DA9052_IRQ_MASK_POS_3,
-       },
-       [DA9052_IRQ_GPI11] = {
-               .reg_offset = 3,
-               .mask = DA9052_IRQ_MASK_POS_4,
-       },
-       [DA9052_IRQ_GPI12] = {
-               .reg_offset = 3,
-               .mask = DA9052_IRQ_MASK_POS_5,
-       },
-       [DA9052_IRQ_GPI13] = {
-               .reg_offset = 3,
-               .mask = DA9052_IRQ_MASK_POS_6,
-       },
-       [DA9052_IRQ_GPI14] = {
-               .reg_offset = 3,
-               .mask = DA9052_IRQ_MASK_POS_7,
-       },
-       [DA9052_IRQ_GPI15] = {
-               .reg_offset = 3,
-               .mask = DA9052_IRQ_MASK_POS_8,
-       },
-};
-
-static struct regmap_irq_chip da9052_regmap_irq_chip = {
-       .name = "da9052_irq",
-       .status_base = DA9052_EVENT_A_REG,
-       .mask_base = DA9052_IRQ_MASK_A_REG,
-       .ack_base = DA9052_EVENT_A_REG,
-       .num_regs = DA9052_NUM_IRQ_REGS,
-       .irqs = da9052_irqs,
-       .num_irqs = ARRAY_SIZE(da9052_irqs),
-};
-
 struct regmap_config da9052_regmap_config = {
        .reg_bits = 8,
        .val_bits = 8,
@@ -782,45 +545,31 @@ int da9052_device_init(struct da9052 *da9052, u8 chip_id)
 
        da9052->chip_id = chip_id;
 
-       if (!pdata || !pdata->irq_base)
-               da9052->irq_base = -1;
-       else
-               da9052->irq_base = pdata->irq_base;
-
-       ret = regmap_add_irq_chip(da9052->regmap, da9052->chip_irq,
-                                 IRQF_TRIGGER_LOW | IRQF_ONESHOT,
-                                 da9052->irq_base, &da9052_regmap_irq_chip,
-                                 &da9052->irq_data);
-       if (ret < 0)
-               goto regmap_err;
-
-       da9052->irq_base = regmap_irq_chip_get_base(da9052->irq_data);
-
-       ret = request_threaded_irq(DA9052_IRQ_ADC_EOM, NULL, da9052_auxadc_irq,
-                                  IRQF_TRIGGER_LOW | IRQF_ONESHOT,
-                                  "adc irq", da9052);
-       if (ret != 0)
-               dev_err(da9052->dev, "DA9052 ADC IRQ failed ret=%d\n", ret);
+       ret = da9052_irq_init(da9052);
+       if (ret != 0) {
+               dev_err(da9052->dev, "da9052_irq_init failed: %d\n", ret);
+               return ret;
+       }
 
        ret = mfd_add_devices(da9052->dev, -1, da9052_subdev_info,
                              ARRAY_SIZE(da9052_subdev_info), NULL, 0, NULL);
-       if (ret)
+       if (ret) {
+               dev_err(da9052->dev, "mfd_add_devices failed: %d\n", ret);
                goto err;
+       }
 
        return 0;
 
 err:
-       free_irq(DA9052_IRQ_ADC_EOM, da9052);
-       mfd_remove_devices(da9052->dev);
-regmap_err:
+       da9052_irq_exit(da9052);
+
        return ret;
 }
 
 void da9052_device_exit(struct da9052 *da9052)
 {
-       free_irq(DA9052_IRQ_ADC_EOM, da9052);
-       regmap_del_irq_chip(da9052->chip_irq, da9052->irq_data);
        mfd_remove_devices(da9052->dev);
+       da9052_irq_exit(da9052);
 }
 
 MODULE_AUTHOR("David Dajun Chen <dchen@diasemi.com>");
diff --git a/drivers/mfd/da9052-irq.c b/drivers/mfd/da9052-irq.c
new file mode 100644 (file)
index 0000000..57ae784
--- /dev/null
@@ -0,0 +1,288 @@
+/*
+ * DA9052 interrupt support
+ *
+ * Author: Fabio Estevam <fabio.estevam@freescale.com>
+ * Based on arizona-irq.c, which is:
+ *
+ * Copyright 2012 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+#include <linux/mfd/da9052/da9052.h>
+#include <linux/mfd/da9052/reg.h>
+
+#define DA9052_NUM_IRQ_REGS            4
+#define DA9052_IRQ_MASK_POS_1          0x01
+#define DA9052_IRQ_MASK_POS_2          0x02
+#define DA9052_IRQ_MASK_POS_3          0x04
+#define DA9052_IRQ_MASK_POS_4          0x08
+#define DA9052_IRQ_MASK_POS_5          0x10
+#define DA9052_IRQ_MASK_POS_6          0x20
+#define DA9052_IRQ_MASK_POS_7          0x40
+#define DA9052_IRQ_MASK_POS_8          0x80
+
+static struct regmap_irq da9052_irqs[] = {
+       [DA9052_IRQ_DCIN] = {
+               .reg_offset = 0,
+               .mask = DA9052_IRQ_MASK_POS_1,
+       },
+       [DA9052_IRQ_VBUS] = {
+               .reg_offset = 0,
+               .mask = DA9052_IRQ_MASK_POS_2,
+       },
+       [DA9052_IRQ_DCINREM] = {
+               .reg_offset = 0,
+               .mask = DA9052_IRQ_MASK_POS_3,
+       },
+       [DA9052_IRQ_VBUSREM] = {
+               .reg_offset = 0,
+               .mask = DA9052_IRQ_MASK_POS_4,
+       },
+       [DA9052_IRQ_VDDLOW] = {
+               .reg_offset = 0,
+               .mask = DA9052_IRQ_MASK_POS_5,
+       },
+       [DA9052_IRQ_ALARM] = {
+               .reg_offset = 0,
+               .mask = DA9052_IRQ_MASK_POS_6,
+       },
+       [DA9052_IRQ_SEQRDY] = {
+               .reg_offset = 0,
+               .mask = DA9052_IRQ_MASK_POS_7,
+       },
+       [DA9052_IRQ_COMP1V2] = {
+               .reg_offset = 0,
+               .mask = DA9052_IRQ_MASK_POS_8,
+       },
+       [DA9052_IRQ_NONKEY] = {
+               .reg_offset = 1,
+               .mask = DA9052_IRQ_MASK_POS_1,
+       },
+       [DA9052_IRQ_IDFLOAT] = {
+               .reg_offset = 1,
+               .mask = DA9052_IRQ_MASK_POS_2,
+       },
+       [DA9052_IRQ_IDGND] = {
+               .reg_offset = 1,
+               .mask = DA9052_IRQ_MASK_POS_3,
+       },
+       [DA9052_IRQ_CHGEND] = {
+               .reg_offset = 1,
+               .mask = DA9052_IRQ_MASK_POS_4,
+       },
+       [DA9052_IRQ_TBAT] = {
+               .reg_offset = 1,
+               .mask = DA9052_IRQ_MASK_POS_5,
+       },
+       [DA9052_IRQ_ADC_EOM] = {
+               .reg_offset = 1,
+               .mask = DA9052_IRQ_MASK_POS_6,
+       },
+       [DA9052_IRQ_PENDOWN] = {
+               .reg_offset = 1,
+               .mask = DA9052_IRQ_MASK_POS_7,
+       },
+       [DA9052_IRQ_TSIREADY] = {
+               .reg_offset = 1,
+               .mask = DA9052_IRQ_MASK_POS_8,
+       },
+       [DA9052_IRQ_GPI0] = {
+               .reg_offset = 2,
+               .mask = DA9052_IRQ_MASK_POS_1,
+       },
+       [DA9052_IRQ_GPI1] = {
+               .reg_offset = 2,
+               .mask = DA9052_IRQ_MASK_POS_2,
+       },
+       [DA9052_IRQ_GPI2] = {
+               .reg_offset = 2,
+               .mask = DA9052_IRQ_MASK_POS_3,
+       },
+       [DA9052_IRQ_GPI3] = {
+               .reg_offset = 2,
+               .mask = DA9052_IRQ_MASK_POS_4,
+       },
+       [DA9052_IRQ_GPI4] = {
+               .reg_offset = 2,
+               .mask = DA9052_IRQ_MASK_POS_5,
+       },
+       [DA9052_IRQ_GPI5] = {
+               .reg_offset = 2,
+               .mask = DA9052_IRQ_MASK_POS_6,
+       },
+       [DA9052_IRQ_GPI6] = {
+               .reg_offset = 2,
+               .mask = DA9052_IRQ_MASK_POS_7,
+       },
+       [DA9052_IRQ_GPI7] = {
+               .reg_offset = 2,
+               .mask = DA9052_IRQ_MASK_POS_8,
+       },
+       [DA9052_IRQ_GPI8] = {
+               .reg_offset = 3,
+               .mask = DA9052_IRQ_MASK_POS_1,
+       },
+       [DA9052_IRQ_GPI9] = {
+               .reg_offset = 3,
+               .mask = DA9052_IRQ_MASK_POS_2,
+       },
+       [DA9052_IRQ_GPI10] = {
+               .reg_offset = 3,
+               .mask = DA9052_IRQ_MASK_POS_3,
+       },
+       [DA9052_IRQ_GPI11] = {
+               .reg_offset = 3,
+               .mask = DA9052_IRQ_MASK_POS_4,
+       },
+       [DA9052_IRQ_GPI12] = {
+               .reg_offset = 3,
+               .mask = DA9052_IRQ_MASK_POS_5,
+       },
+       [DA9052_IRQ_GPI13] = {
+               .reg_offset = 3,
+               .mask = DA9052_IRQ_MASK_POS_6,
+       },
+       [DA9052_IRQ_GPI14] = {
+               .reg_offset = 3,
+               .mask = DA9052_IRQ_MASK_POS_7,
+       },
+       [DA9052_IRQ_GPI15] = {
+               .reg_offset = 3,
+               .mask = DA9052_IRQ_MASK_POS_8,
+       },
+};
+
+static struct regmap_irq_chip da9052_regmap_irq_chip = {
+       .name = "da9052_irq",
+       .status_base = DA9052_EVENT_A_REG,
+       .mask_base = DA9052_IRQ_MASK_A_REG,
+       .ack_base = DA9052_EVENT_A_REG,
+       .num_regs = DA9052_NUM_IRQ_REGS,
+       .irqs = da9052_irqs,
+       .num_irqs = ARRAY_SIZE(da9052_irqs),
+};
+
+static int da9052_map_irq(struct da9052 *da9052, int irq)
+{
+       return regmap_irq_get_virq(da9052->irq_data, irq);
+}
+
+int da9052_enable_irq(struct da9052 *da9052, int irq)
+{
+       irq = da9052_map_irq(da9052, irq);
+       if (irq < 0)
+               return irq;
+
+       enable_irq(irq);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(da9052_enable_irq);
+
+int da9052_disable_irq(struct da9052 *da9052, int irq)
+{
+       irq = da9052_map_irq(da9052, irq);
+       if (irq < 0)
+               return irq;
+
+       disable_irq(irq);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(da9052_disable_irq);
+
+int da9052_disable_irq_nosync(struct da9052 *da9052, int irq)
+{
+       irq = da9052_map_irq(da9052, irq);
+       if (irq < 0)
+               return irq;
+
+       disable_irq_nosync(irq);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(da9052_disable_irq_nosync);
+
+int da9052_request_irq(struct da9052 *da9052, int irq, char *name,
+                          irq_handler_t handler, void *data)
+{
+       irq = da9052_map_irq(da9052, irq);
+       if (irq < 0)
+               return irq;
+
+       return request_threaded_irq(irq, NULL, handler,
+                                    IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+                                    name, data);
+}
+EXPORT_SYMBOL_GPL(da9052_request_irq);
+
+void da9052_free_irq(struct da9052 *da9052, int irq, void *data)
+{
+       irq = da9052_map_irq(da9052, irq);
+       if (irq < 0)
+               return;
+
+       free_irq(irq, data);
+}
+EXPORT_SYMBOL_GPL(da9052_free_irq);
+
+static irqreturn_t da9052_auxadc_irq(int irq, void *irq_data)
+{
+       struct da9052 *da9052 = irq_data;
+
+       complete(&da9052->done);
+
+       return IRQ_HANDLED;
+}
+
+int da9052_irq_init(struct da9052 *da9052)
+{
+       int ret;
+
+       ret = regmap_add_irq_chip(da9052->regmap, da9052->chip_irq,
+                                 IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+                                 -1, &da9052_regmap_irq_chip,
+                                 &da9052->irq_data);
+       if (ret < 0) {
+               dev_err(da9052->dev, "regmap_add_irq_chip failed: %d\n", ret);
+               goto regmap_err;
+       }
+
+       ret = da9052_request_irq(da9052, DA9052_IRQ_ADC_EOM, "adc-irq",
+                           da9052_auxadc_irq, da9052);
+
+       if (ret != 0) {
+               dev_err(da9052->dev, "DA9052_IRQ_ADC_EOM failed: %d\n", ret);
+               goto request_irq_err;
+       }
+
+       return 0;
+
+request_irq_err:
+       regmap_del_irq_chip(da9052->chip_irq, da9052->irq_data);
+regmap_err:
+       return ret;
+
+}
+
+int da9052_irq_exit(struct da9052 *da9052)
+{
+       da9052_free_irq(da9052, DA9052_IRQ_ADC_EOM , da9052);
+       regmap_del_irq_chip(da9052->chip_irq, da9052->irq_data);
+
+       return 0;
+}
index 2971056..dc8826d 100644 (file)
@@ -2763,7 +2763,7 @@ static int db8500_irq_init(struct device_node *np)
 
 void __init db8500_prcmu_early_init(void)
 {
-       if (cpu_is_u8500v2()) {
+       if (cpu_is_u8500v2() || cpu_is_u9540()) {
                void *tcpm_base = ioremap_nocache(U8500_PRCMU_TCPM_BASE, SZ_4K);
 
                if (tcpm_base != NULL) {
@@ -2781,7 +2781,11 @@ void __init db8500_prcmu_early_init(void)
                        iounmap(tcpm_base);
                }
 
-               tcdm_base = __io_address(U8500_PRCMU_TCDM_BASE);
+               if (cpu_is_u9540())
+                       tcdm_base = ioremap_nocache(U8500_PRCMU_TCDM_BASE,
+                                               SZ_4K + SZ_8K) + SZ_8K;
+               else
+                       tcdm_base = __io_address(U8500_PRCMU_TCDM_BASE);
        } else {
                pr_err("prcmu: Unsupported chip version\n");
                BUG();
index 0b8b55b..e80587f 100644 (file)
@@ -211,7 +211,7 @@ static int jz4740_adc_probe(struct platform_device *pdev)
        int ret;
        int irq_base;
 
-       adc = kmalloc(sizeof(*adc), GFP_KERNEL);
+       adc = devm_kzalloc(&pdev->dev, sizeof(*adc), GFP_KERNEL);
        if (!adc) {
                dev_err(&pdev->dev, "Failed to allocate driver structure\n");
                return -ENOMEM;
@@ -221,30 +221,27 @@ static int jz4740_adc_probe(struct platform_device *pdev)
        if (adc->irq < 0) {
                ret = adc->irq;
                dev_err(&pdev->dev, "Failed to get platform irq: %d\n", ret);
-               goto err_free;
+               return ret;
        }
 
        irq_base = platform_get_irq(pdev, 1);
        if (irq_base < 0) {
-               ret = irq_base;
-               dev_err(&pdev->dev, "Failed to get irq base: %d\n", ret);
-               goto err_free;
+               dev_err(&pdev->dev, "Failed to get irq base: %d\n", irq_base);
+               return irq_base;
        }
 
        mem_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!mem_base) {
-               ret = -ENOENT;
                dev_err(&pdev->dev, "Failed to get platform mmio resource\n");
-               goto err_free;
+               return -ENOENT;
        }
 
        /* Only request the shared registers for the MFD driver */
        adc->mem = request_mem_region(mem_base->start, JZ_REG_ADC_STATUS,
                                        pdev->name);
        if (!adc->mem) {
-               ret = -EBUSY;
                dev_err(&pdev->dev, "Failed to request mmio memory region\n");
-               goto err_free;
+               return -EBUSY;
        }
 
        adc->base = ioremap_nocache(adc->mem->start, resource_size(adc->mem));
@@ -301,9 +298,6 @@ err_iounmap:
        iounmap(adc->base);
 err_release_mem_region:
        release_mem_region(adc->mem->start, resource_size(adc->mem));
-err_free:
-       kfree(adc);
-
        return ret;
 }
 
@@ -325,8 +319,6 @@ static int jz4740_adc_remove(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, NULL);
 
-       kfree(adc);
-
        return 0;
 }
 
index 2ad24ca..d9d9303 100644 (file)
@@ -734,7 +734,7 @@ static int lpc_ich_init_gpio(struct pci_dev *dev,
        pci_read_config_dword(dev, ACPIBASE, &base_addr_cfg);
        base_addr = base_addr_cfg & 0x0000ff80;
        if (!base_addr) {
-               dev_err(&dev->dev, "I/O space for ACPI uninitialized\n");
+               dev_notice(&dev->dev, "I/O space for ACPI uninitialized\n");
                lpc_ich_cells[LPC_GPIO].num_resources--;
                goto gpe0_done;
        }
@@ -760,7 +760,7 @@ gpe0_done:
        pci_read_config_dword(dev, GPIOBASE, &base_addr_cfg);
        base_addr = base_addr_cfg & 0x0000ff80;
        if (!base_addr) {
-               dev_err(&dev->dev, "I/O space for GPIO uninitialized\n");
+               dev_notice(&dev->dev, "I/O space for GPIO uninitialized\n");
                ret = -ENODEV;
                goto gpio_done;
        }
@@ -810,7 +810,7 @@ static int lpc_ich_init_wdt(struct pci_dev *dev,
        pci_read_config_dword(dev, ACPIBASE, &base_addr_cfg);
        base_addr = base_addr_cfg & 0x0000ff80;
        if (!base_addr) {
-               dev_err(&dev->dev, "I/O space for ACPI uninitialized\n");
+               dev_notice(&dev->dev, "I/O space for ACPI uninitialized\n");
                ret = -ENODEV;
                goto wdt_done;
        }
@@ -830,12 +830,15 @@ static int lpc_ich_init_wdt(struct pci_dev *dev,
         * we have to read RCBA from PCI Config space 0xf0 and use
         * it as base. GCS = RCBA + ICH6_GCS(0x3410).
         */
-       if (lpc_chipset_info[id->driver_data].iTCO_version == 2) {
+       if (lpc_chipset_info[id->driver_data].iTCO_version == 1) {
+               /* Don't register iomem for TCO ver 1 */
+               lpc_ich_cells[LPC_WDT].num_resources--;
+       } else {
                pci_read_config_dword(dev, RCBABASE, &base_addr_cfg);
                base_addr = base_addr_cfg & 0xffffc000;
                if (!(base_addr_cfg & 1)) {
-                       pr_err("RCBA is disabled by hardware/BIOS, "
-                                       "device disabled\n");
+                       dev_notice(&dev->dev, "RCBA is disabled by "
+                                       "hardware/BIOS, device disabled\n");
                        ret = -ENODEV;
                        goto wdt_done;
                }
@@ -871,6 +874,7 @@ static int lpc_ich_probe(struct pci_dev *dev,
         * successfully.
         */
        if (!cell_added) {
+               dev_warn(&dev->dev, "No MFD cells added\n");
                lpc_ich_restore_config_space(dev);
                return -ENODEV;
        }
index 1aba023..2a9b100 100644 (file)
 #define MC13XXX_REVISION_FAB           (0x03 << 11)
 #define MC13XXX_REVISION_ICIDCODE      (0x3f << 13)
 
+#define MC34708_REVISION_REVMETAL      (0x07 <<  0)
+#define MC34708_REVISION_REVFULL       (0x07 <<  3)
+#define MC34708_REVISION_FIN           (0x07 <<  6)
+#define MC34708_REVISION_FAB           (0x07 <<  9)
+
 #define MC13XXX_ADC1           44
 #define MC13XXX_ADC1_ADEN              (1 << 0)
 #define MC13XXX_ADC1_RAND              (1 << 1)
@@ -410,62 +415,52 @@ static irqreturn_t mc13xxx_irq_thread(int irq, void *data)
        return IRQ_RETVAL(handled);
 }
 
-static const char *mc13xxx_chipname[] = {
-       [MC13XXX_ID_MC13783] = "mc13783",
-       [MC13XXX_ID_MC13892] = "mc13892",
-};
-
 #define maskval(reg, mask)     (((reg) & (mask)) >> __ffs(mask))
-static int mc13xxx_identify(struct mc13xxx *mc13xxx)
+static void mc13xxx_print_revision(struct mc13xxx *mc13xxx, u32 revision)
 {
-       u32 icid;
-       u32 revision;
-       int ret;
-
-       /*
-        * Get the generation ID from register 46, as apparently some older
-        * IC revisions only have this info at this location. Newer ICs seem to
-        * have both.
-        */
-       ret = mc13xxx_reg_read(mc13xxx, 46, &icid);
-       if (ret)
-               return ret;
+       dev_info(mc13xxx->dev, "%s: rev: %d.%d, "
+                       "fin: %d, fab: %d, icid: %d/%d\n",
+                       mc13xxx->variant->name,
+                       maskval(revision, MC13XXX_REVISION_REVFULL),
+                       maskval(revision, MC13XXX_REVISION_REVMETAL),
+                       maskval(revision, MC13XXX_REVISION_FIN),
+                       maskval(revision, MC13XXX_REVISION_FAB),
+                       maskval(revision, MC13XXX_REVISION_ICID),
+                       maskval(revision, MC13XXX_REVISION_ICIDCODE));
+}
 
-       icid = (icid >> 6) & 0x7;
+static void mc34708_print_revision(struct mc13xxx *mc13xxx, u32 revision)
+{
+       dev_info(mc13xxx->dev, "%s: rev %d.%d, fin: %d, fab: %d\n",
+                       mc13xxx->variant->name,
+                       maskval(revision, MC34708_REVISION_REVFULL),
+                       maskval(revision, MC34708_REVISION_REVMETAL),
+                       maskval(revision, MC34708_REVISION_FIN),
+                       maskval(revision, MC34708_REVISION_FAB));
+}
 
-       switch (icid) {
-       case 2:
-               mc13xxx->ictype = MC13XXX_ID_MC13783;
-               break;
-       case 7:
-               mc13xxx->ictype = MC13XXX_ID_MC13892;
-               break;
-       default:
-               mc13xxx->ictype = MC13XXX_ID_INVALID;
-               break;
-       }
+/* These are only exported for mc13xxx-i2c and mc13xxx-spi */
+struct mc13xxx_variant mc13xxx_variant_mc13783 = {
+       .name = "mc13783",
+       .print_revision = mc13xxx_print_revision,
+};
+EXPORT_SYMBOL_GPL(mc13xxx_variant_mc13783);
 
-       if (mc13xxx->ictype == MC13XXX_ID_MC13783 ||
-                       mc13xxx->ictype == MC13XXX_ID_MC13892) {
-               ret = mc13xxx_reg_read(mc13xxx, MC13XXX_REVISION, &revision);
-
-               dev_info(mc13xxx->dev, "%s: rev: %d.%d, "
-                               "fin: %d, fab: %d, icid: %d/%d\n",
-                               mc13xxx_chipname[mc13xxx->ictype],
-                               maskval(revision, MC13XXX_REVISION_REVFULL),
-                               maskval(revision, MC13XXX_REVISION_REVMETAL),
-                               maskval(revision, MC13XXX_REVISION_FIN),
-                               maskval(revision, MC13XXX_REVISION_FAB),
-                               maskval(revision, MC13XXX_REVISION_ICID),
-                               maskval(revision, MC13XXX_REVISION_ICIDCODE));
-       }
+struct mc13xxx_variant mc13xxx_variant_mc13892 = {
+       .name = "mc13892",
+       .print_revision = mc13xxx_print_revision,
+};
+EXPORT_SYMBOL_GPL(mc13xxx_variant_mc13892);
 
-       return (mc13xxx->ictype == MC13XXX_ID_INVALID) ? -ENODEV : 0;
-}
+struct mc13xxx_variant mc13xxx_variant_mc34708 = {
+       .name = "mc34708",
+       .print_revision = mc34708_print_revision,
+};
+EXPORT_SYMBOL_GPL(mc13xxx_variant_mc34708);
 
 static const char *mc13xxx_get_chipname(struct mc13xxx *mc13xxx)
 {
-       return mc13xxx_chipname[mc13xxx->ictype];
+       return mc13xxx->variant->name;
 }
 
 int mc13xxx_get_flags(struct mc13xxx *mc13xxx)
@@ -653,13 +648,16 @@ int mc13xxx_common_init(struct mc13xxx *mc13xxx,
                struct mc13xxx_platform_data *pdata, int irq)
 {
        int ret;
+       u32 revision;
 
        mc13xxx_lock(mc13xxx);
 
-       ret = mc13xxx_identify(mc13xxx);
+       ret = mc13xxx_reg_read(mc13xxx, MC13XXX_REVISION, &revision);
        if (ret)
                goto err_revision;
 
+       mc13xxx->variant->print_revision(mc13xxx, revision);
+
        /* mask all irqs */
        ret = mc13xxx_reg_write(mc13xxx, MC13XXX_IRQMASK0, 0x00ffffff);
        if (ret)
index 7957999..f745e27 100644 (file)
 static const struct i2c_device_id mc13xxx_i2c_device_id[] = {
        {
                .name = "mc13892",
-               .driver_data = MC13XXX_ID_MC13892,
+               .driver_data = (kernel_ulong_t)&mc13xxx_variant_mc13892,
+       }, {
+               .name = "mc34708",
+               .driver_data = (kernel_ulong_t)&mc13xxx_variant_mc34708,
        }, {
                /* sentinel */
        }
@@ -34,7 +37,10 @@ MODULE_DEVICE_TABLE(i2c, mc13xxx_i2c_device_id);
 static const struct of_device_id mc13xxx_dt_ids[] = {
        {
                .compatible = "fsl,mc13892",
-               .data = (void *) &mc13xxx_i2c_device_id[0],
+               .data = &mc13xxx_variant_mc13892,
+       }, {
+               .compatible = "fsl,mc34708",
+               .data = &mc13xxx_variant_mc34708,
        }, {
                /* sentinel */
        }
@@ -76,11 +82,15 @@ static int mc13xxx_i2c_probe(struct i2c_client *client,
                return ret;
        }
 
-       ret = mc13xxx_common_init(mc13xxx, pdata, client->irq);
+       if (client->dev.of_node) {
+               const struct of_device_id *of_id =
+                       of_match_device(mc13xxx_dt_ids, &client->dev);
+               mc13xxx->variant = of_id->data;
+       } else {
+               mc13xxx->variant = (void *)id->driver_data;
+       }
 
-       if (ret == 0 && (id->driver_data != mc13xxx->ictype))
-               dev_warn(mc13xxx->dev,
-                               "device id doesn't match auto detection!\n");
+       ret = mc13xxx_common_init(mc13xxx, pdata, client->irq);
 
        return ret;
 }
index cb32f69..3032bae 100644 (file)
 static const struct spi_device_id mc13xxx_device_id[] = {
        {
                .name = "mc13783",
-               .driver_data = MC13XXX_ID_MC13783,
+               .driver_data = (kernel_ulong_t)&mc13xxx_variant_mc13783,
        }, {
                .name = "mc13892",
-               .driver_data = MC13XXX_ID_MC13892,
+               .driver_data = (kernel_ulong_t)&mc13xxx_variant_mc13892,
+       }, {
+               .name = "mc34708",
+               .driver_data = (kernel_ulong_t)&mc13xxx_variant_mc34708,
        }, {
                /* sentinel */
        }
@@ -39,8 +42,9 @@ static const struct spi_device_id mc13xxx_device_id[] = {
 MODULE_DEVICE_TABLE(spi, mc13xxx_device_id);
 
 static const struct of_device_id mc13xxx_dt_ids[] = {
-       { .compatible = "fsl,mc13783", .data = (void *) MC13XXX_ID_MC13783, },
-       { .compatible = "fsl,mc13892", .data = (void *) MC13XXX_ID_MC13892, },
+       { .compatible = "fsl,mc13783", .data = &mc13xxx_variant_mc13783, },
+       { .compatible = "fsl,mc13892", .data = &mc13xxx_variant_mc13892, },
+       { .compatible = "fsl,mc34708", .data = &mc13xxx_variant_mc34708, },
        { /* sentinel */ }
 };
 MODULE_DEVICE_TABLE(of, mc13xxx_dt_ids);
@@ -144,19 +148,18 @@ static int mc13xxx_spi_probe(struct spi_device *spi)
                return ret;
        }
 
-       ret = mc13xxx_common_init(mc13xxx, pdata, spi->irq);
+       if (spi->dev.of_node) {
+               const struct of_device_id *of_id =
+                       of_match_device(mc13xxx_dt_ids, &spi->dev);
 
-       if (ret) {
-               dev_set_drvdata(&spi->dev, NULL);
+               mc13xxx->variant = of_id->data;
        } else {
-               const struct spi_device_id *devid =
-                       spi_get_device_id(spi);
-               if (!devid || devid->driver_data != mc13xxx->ictype)
-                       dev_warn(mc13xxx->dev,
-                               "device id doesn't match auto detection!\n");
+               const struct spi_device_id *id_entry = spi_get_device_id(spi);
+
+               mc13xxx->variant = (void *)id_entry->driver_data;
        }
 
-       return ret;
+       return mc13xxx_common_init(mc13xxx, pdata, spi->irq);
 }
 
 static int mc13xxx_spi_remove(struct spi_device *spi)
index bbba06f..460ec5c 100644 (file)
 #include <linux/regmap.h>
 #include <linux/mfd/mc13xxx.h>
 
-enum mc13xxx_id {
-       MC13XXX_ID_MC13783,
-       MC13XXX_ID_MC13892,
-       MC13XXX_ID_INVALID,
+#define MC13XXX_NUMREGS 0x3f
+
+struct mc13xxx;
+
+struct mc13xxx_variant {
+       const char *name;
+       void (*print_revision)(struct mc13xxx *mc13xxx, u32 revision);
 };
 
-#define MC13XXX_NUMREGS 0x3f
+extern struct mc13xxx_variant
+               mc13xxx_variant_mc13783,
+               mc13xxx_variant_mc13892,
+               mc13xxx_variant_mc34708;
 
 struct mc13xxx {
        struct regmap *regmap;
 
        struct device *dev;
-       enum mc13xxx_id ictype;
+       const struct mc13xxx_variant *variant;
 
        struct mutex lock;
        int irq;
index f8b7771..7604f4e 100644 (file)
 #include <linux/irqdomain.h>
 #include <linux/of.h>
 
+static struct device_type mfd_dev_type = {
+       .name   = "mfd_device",
+};
+
 int mfd_cell_enable(struct platform_device *pdev)
 {
        const struct mfd_cell *cell = mfd_get_cell(pdev);
@@ -91,6 +95,7 @@ static int mfd_add_device(struct device *parent, int id,
                goto fail_device;
 
        pdev->dev.parent = parent;
+       pdev->dev.type = &mfd_dev_type;
 
        if (parent->of_node && cell->of_compatible) {
                for_each_child_of_node(parent->of_node, np) {
@@ -204,10 +209,16 @@ EXPORT_SYMBOL(mfd_add_devices);
 
 static int mfd_remove_devices_fn(struct device *dev, void *c)
 {
-       struct platform_device *pdev = to_platform_device(dev);
-       const struct mfd_cell *cell = mfd_get_cell(pdev);
+       struct platform_device *pdev;
+       const struct mfd_cell *cell;
        atomic_t **usage_count = c;
 
+       if (dev->type != &mfd_dev_type)
+               return 0;
+
+       pdev = to_platform_device(dev);
+       cell = mfd_get_cell(pdev);
+
        /* find the base address of usage_count pointers (for freeing) */
        if (!*usage_count || (cell->usage_count < *usage_count))
                *usage_count = cell->usage_count;
index fe00cdd..b41db59 100644 (file)
@@ -345,7 +345,7 @@ int rc5t583_irq_init(struct rc5t583 *rc5t583, int irq, int irq_base)
        mutex_init(&rc5t583->irq_lock);
 
        /* Initailize all int register to 0 */
-       for (i = 0; i < RC5T583_MAX_INTERRUPT_MASK_REGS; i++)  {
+       for (i = 0; i < RC5T583_MAX_INTERRUPT_EN_REGS; i++)  {
                ret = rc5t583_write(rc5t583->dev, irq_en_add[i],
                                rc5t583->irq_en_reg[i]);
                if (ret < 0)
diff --git a/drivers/mfd/retu-mfd.c b/drivers/mfd/retu-mfd.c
new file mode 100644 (file)
index 0000000..7ff4a37
--- /dev/null
@@ -0,0 +1,264 @@
+/*
+ * Retu MFD driver
+ *
+ * Copyright (C) 2004, 2005 Nokia Corporation
+ *
+ * Based on code written by Juha Yrjölä, David Weinehall and Mikko Ylinen.
+ * Rewritten by Aaro Koskinen.
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of this
+ * archive for more details.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/irq.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/retu.h>
+#include <linux/interrupt.h>
+#include <linux/moduleparam.h>
+
+/* Registers */
+#define RETU_REG_ASICR         0x00            /* ASIC ID and revision */
+#define RETU_REG_ASICR_VILMA   (1 << 7)        /* Bit indicating Vilma */
+#define RETU_REG_IDR           0x01            /* Interrupt ID */
+#define RETU_REG_IMR           0x02            /* Interrupt mask */
+
+/* Interrupt sources */
+#define RETU_INT_PWR           0               /* Power button */
+
+struct retu_dev {
+       struct regmap                   *regmap;
+       struct device                   *dev;
+       struct mutex                    mutex;
+       struct regmap_irq_chip_data     *irq_data;
+};
+
+static struct resource retu_pwrbutton_res[] = {
+       {
+               .name   = "retu-pwrbutton",
+               .start  = RETU_INT_PWR,
+               .end    = RETU_INT_PWR,
+               .flags  = IORESOURCE_IRQ,
+       },
+};
+
+static struct mfd_cell retu_devs[] = {
+       {
+               .name           = "retu-wdt"
+       },
+       {
+               .name           = "retu-pwrbutton",
+               .resources      = retu_pwrbutton_res,
+               .num_resources  = ARRAY_SIZE(retu_pwrbutton_res),
+       }
+};
+
+static struct regmap_irq retu_irqs[] = {
+       [RETU_INT_PWR] = {
+               .mask = 1 << RETU_INT_PWR,
+       }
+};
+
+static struct regmap_irq_chip retu_irq_chip = {
+       .name           = "RETU",
+       .irqs           = retu_irqs,
+       .num_irqs       = ARRAY_SIZE(retu_irqs),
+       .num_regs       = 1,
+       .status_base    = RETU_REG_IDR,
+       .mask_base      = RETU_REG_IMR,
+       .ack_base       = RETU_REG_IDR,
+};
+
+/* Retu device registered for the power off. */
+static struct retu_dev *retu_pm_power_off;
+
+int retu_read(struct retu_dev *rdev, u8 reg)
+{
+       int ret;
+       int value;
+
+       mutex_lock(&rdev->mutex);
+       ret = regmap_read(rdev->regmap, reg, &value);
+       mutex_unlock(&rdev->mutex);
+
+       return ret ? ret : value;
+}
+EXPORT_SYMBOL_GPL(retu_read);
+
+int retu_write(struct retu_dev *rdev, u8 reg, u16 data)
+{
+       int ret;
+
+       mutex_lock(&rdev->mutex);
+       ret = regmap_write(rdev->regmap, reg, data);
+       mutex_unlock(&rdev->mutex);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(retu_write);
+
+static void retu_power_off(void)
+{
+       struct retu_dev *rdev = retu_pm_power_off;
+       int reg;
+
+       mutex_lock(&retu_pm_power_off->mutex);
+
+       /* Ignore power button state */
+       regmap_read(rdev->regmap, RETU_REG_CC1, &reg);
+       regmap_write(rdev->regmap, RETU_REG_CC1, reg | 2);
+
+       /* Expire watchdog immediately */
+       regmap_write(rdev->regmap, RETU_REG_WATCHDOG, 0);
+
+       /* Wait for poweroff */
+       for (;;)
+               cpu_relax();
+
+       mutex_unlock(&retu_pm_power_off->mutex);
+}
+
+static int retu_regmap_read(void *context, const void *reg, size_t reg_size,
+                           void *val, size_t val_size)
+{
+       int ret;
+       struct device *dev = context;
+       struct i2c_client *i2c = to_i2c_client(dev);
+
+       BUG_ON(reg_size != 1 || val_size != 2);
+
+       ret = i2c_smbus_read_word_data(i2c, *(u8 const *)reg);
+       if (ret < 0)
+               return ret;
+
+       *(u16 *)val = ret;
+       return 0;
+}
+
+static int retu_regmap_write(void *context, const void *data, size_t count)
+{
+       u8 reg;
+       u16 val;
+       struct device *dev = context;
+       struct i2c_client *i2c = to_i2c_client(dev);
+
+       BUG_ON(count != sizeof(reg) + sizeof(val));
+       memcpy(&reg, data, sizeof(reg));
+       memcpy(&val, data + sizeof(reg), sizeof(val));
+       return i2c_smbus_write_word_data(i2c, reg, val);
+}
+
+static struct regmap_bus retu_bus = {
+       .read = retu_regmap_read,
+       .write = retu_regmap_write,
+       .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
+};
+
+static struct regmap_config retu_config = {
+       .reg_bits = 8,
+       .val_bits = 16,
+};
+
+static int __devinit retu_probe(struct i2c_client *i2c,
+                               const struct i2c_device_id *id)
+{
+       struct retu_dev *rdev;
+       int ret;
+
+       rdev = devm_kzalloc(&i2c->dev, sizeof(*rdev), GFP_KERNEL);
+       if (rdev == NULL)
+               return -ENOMEM;
+
+       i2c_set_clientdata(i2c, rdev);
+       rdev->dev = &i2c->dev;
+       mutex_init(&rdev->mutex);
+       rdev->regmap = devm_regmap_init(&i2c->dev, &retu_bus, &i2c->dev,
+                                       &retu_config);
+       if (IS_ERR(rdev->regmap))
+               return PTR_ERR(rdev->regmap);
+
+       ret = retu_read(rdev, RETU_REG_ASICR);
+       if (ret < 0) {
+               dev_err(rdev->dev, "could not read Retu revision: %d\n", ret);
+               return ret;
+       }
+
+       dev_info(rdev->dev, "Retu%s v%d.%d found\n",
+                (ret & RETU_REG_ASICR_VILMA) ? " & Vilma" : "",
+                (ret >> 4) & 0x7, ret & 0xf);
+
+       /* Mask all RETU interrupts. */
+       ret = retu_write(rdev, RETU_REG_IMR, 0xffff);
+       if (ret < 0)
+               return ret;
+
+       ret = regmap_add_irq_chip(rdev->regmap, i2c->irq, IRQF_ONESHOT, -1,
+                                 &retu_irq_chip, &rdev->irq_data);
+       if (ret < 0)
+               return ret;
+
+       ret = mfd_add_devices(rdev->dev, -1, retu_devs, ARRAY_SIZE(retu_devs),
+                             NULL, regmap_irq_chip_get_base(rdev->irq_data),
+                             NULL);
+       if (ret < 0) {
+               regmap_del_irq_chip(i2c->irq, rdev->irq_data);
+               return ret;
+       }
+
+       if (!pm_power_off) {
+               retu_pm_power_off = rdev;
+               pm_power_off      = retu_power_off;
+       }
+
+       return 0;
+}
+
+static int __devexit retu_remove(struct i2c_client *i2c)
+{
+       struct retu_dev *rdev = i2c_get_clientdata(i2c);
+
+       if (retu_pm_power_off == rdev) {
+               pm_power_off      = NULL;
+               retu_pm_power_off = NULL;
+       }
+       mfd_remove_devices(rdev->dev);
+       regmap_del_irq_chip(i2c->irq, rdev->irq_data);
+
+       return 0;
+}
+
+static const struct i2c_device_id retu_id[] = {
+       { "retu-mfd", 0 },
+       { }
+};
+MODULE_DEVICE_TABLE(i2c, retu_id);
+
+static struct i2c_driver retu_driver = {
+       .driver         = {
+               .name = "retu-mfd",
+               .owner = THIS_MODULE,
+       },
+       .probe          = retu_probe,
+       .remove         = retu_remove,
+       .id_table       = retu_id,
+};
+module_i2c_driver(retu_driver);
+
+MODULE_DESCRIPTION("Retu MFD driver");
+MODULE_AUTHOR("Juha Yrjölä");
+MODULE_AUTHOR("David Weinehall");
+MODULE_AUTHOR("Mikko Ylinen");
+MODULE_AUTHOR("Aaro Koskinen <aaro.koskinen@iki.fi>");
+MODULE_LICENSE("GPL");
index 56d4377..3a44efa 100644 (file)
@@ -22,6 +22,7 @@
 
 #include <linux/pci.h>
 #include <linux/module.h>
+#include <linux/slab.h>
 #include <linux/dma-mapping.h>
 #include <linux/highmem.h>
 #include <linux/interrupt.h>
index c901fa5..0dd84e9 100644 (file)
 
 static struct regmap_irq s2mps11_irqs[] = {
        [S2MPS11_IRQ_PWRONF] = {
-               .reg_offset = 1,
+               .reg_offset = 0,
                .mask = S2MPS11_IRQ_PWRONF_MASK,
        },
        [S2MPS11_IRQ_PWRONR] = {
-               .reg_offset = 1,
+               .reg_offset = 0,
                .mask = S2MPS11_IRQ_PWRONR_MASK,
        },
        [S2MPS11_IRQ_JIGONBF] = {
-               .reg_offset = 1,
+               .reg_offset = 0,
                .mask = S2MPS11_IRQ_JIGONBF_MASK,
        },
        [S2MPS11_IRQ_JIGONBR] = {
-               .reg_offset = 1,
+               .reg_offset = 0,
                .mask = S2MPS11_IRQ_JIGONBR_MASK,
        },
        [S2MPS11_IRQ_ACOKBF] = {
-               .reg_offset = 1,
+               .reg_offset = 0,
                .mask = S2MPS11_IRQ_ACOKBF_MASK,
        },
        [S2MPS11_IRQ_ACOKBR] = {
-               .reg_offset = 1,
+               .reg_offset = 0,
                .mask = S2MPS11_IRQ_ACOKBR_MASK,
        },
        [S2MPS11_IRQ_PWRON1S] = {
-               .reg_offset = 1,
+               .reg_offset = 0,
                .mask = S2MPS11_IRQ_PWRON1S_MASK,
        },
        [S2MPS11_IRQ_MRB] = {
-               .reg_offset = 1,
+               .reg_offset = 0,
                .mask = S2MPS11_IRQ_MRB_MASK,
        },
        [S2MPS11_IRQ_RTC60S] = {
-               .reg_offset = 2,
+               .reg_offset = 1,
                .mask = S2MPS11_IRQ_RTC60S_MASK,
        },
        [S2MPS11_IRQ_RTCA1] = {
-               .reg_offset = 2,
+               .reg_offset = 1,
                .mask = S2MPS11_IRQ_RTCA1_MASK,
        },
        [S2MPS11_IRQ_RTCA2] = {
-               .reg_offset = 2,
+               .reg_offset = 1,
                .mask = S2MPS11_IRQ_RTCA2_MASK,
        },
        [S2MPS11_IRQ_SMPL] = {
-               .reg_offset = 2,
+               .reg_offset = 1,
                .mask = S2MPS11_IRQ_SMPL_MASK,
        },
        [S2MPS11_IRQ_RTC1S] = {
-               .reg_offset = 2,
+               .reg_offset = 1,
                .mask = S2MPS11_IRQ_RTC1S_MASK,
        },
        [S2MPS11_IRQ_WTSR] = {
-               .reg_offset = 2,
+               .reg_offset = 1,
                .mask = S2MPS11_IRQ_WTSR_MASK,
        },
        [S2MPS11_IRQ_INT120C] = {
-               .reg_offset = 3,
+               .reg_offset = 2,
                .mask = S2MPS11_IRQ_INT120C_MASK,
        },
        [S2MPS11_IRQ_INT140C] = {
-               .reg_offset = 3,
+               .reg_offset = 2,
                .mask = S2MPS11_IRQ_INT140C_MASK,
        },
 };
@@ -92,146 +92,146 @@ static struct regmap_irq s2mps11_irqs[] = {
 
 static struct regmap_irq s5m8767_irqs[] = {
        [S5M8767_IRQ_PWRR] = {
-               .reg_offset = 1,
+               .reg_offset = 0,
                .mask = S5M8767_IRQ_PWRR_MASK,
        },
        [S5M8767_IRQ_PWRF] = {
-               .reg_offset = 1,
+               .reg_offset = 0,
                .mask = S5M8767_IRQ_PWRF_MASK,
        },
        [S5M8767_IRQ_PWR1S] = {
-               .reg_offset = 1,
+               .reg_offset = 0,
                .mask = S5M8767_IRQ_PWR1S_MASK,
        },
        [S5M8767_IRQ_JIGR] = {
-               .reg_offset = 1,
+               .reg_offset = 0,
                .mask = S5M8767_IRQ_JIGR_MASK,
        },
        [S5M8767_IRQ_JIGF] = {
-               .reg_offset = 1,
+               .reg_offset = 0,
                .mask = S5M8767_IRQ_JIGF_MASK,
        },
        [S5M8767_IRQ_LOWBAT2] = {
-               .reg_offset = 1,
+               .reg_offset = 0,
                .mask = S5M8767_IRQ_LOWBAT2_MASK,
        },
        [S5M8767_IRQ_LOWBAT1] = {
-               .reg_offset = 1,
+               .reg_offset = 0,
                .mask = S5M8767_IRQ_LOWBAT1_MASK,
        },
        [S5M8767_IRQ_MRB] = {
-               .reg_offset = 2,
+               .reg_offset = 1,
                .mask = S5M8767_IRQ_MRB_MASK,
        },
        [S5M8767_IRQ_DVSOK2] = {
-               .reg_offset = 2,
+               .reg_offset = 1,
                .mask = S5M8767_IRQ_DVSOK2_MASK,
        },
        [S5M8767_IRQ_DVSOK3] = {
-               .reg_offset = 2,
+               .reg_offset = 1,
                .mask = S5M8767_IRQ_DVSOK3_MASK,
        },
        [S5M8767_IRQ_DVSOK4] = {
-               .reg_offset = 2,
+               .reg_offset = 1,
                .mask = S5M8767_IRQ_DVSOK4_MASK,
        },
        [S5M8767_IRQ_RTC60S] = {
-               .reg_offset = 3,
+               .reg_offset = 2,
                .mask = S5M8767_IRQ_RTC60S_MASK,
        },
        [S5M8767_IRQ_RTCA1] = {
-               .reg_offset = 3,
+               .reg_offset = 2,
                .mask = S5M8767_IRQ_RTCA1_MASK,
        },
        [S5M8767_IRQ_RTCA2] = {
-               .reg_offset = 3,
+               .reg_offset = 2,
                .mask = S5M8767_IRQ_RTCA2_MASK,
        },
        [S5M8767_IRQ_SMPL] = {
-               .reg_offset = 3,
+               .reg_offset = 2,
                .mask = S5M8767_IRQ_SMPL_MASK,
        },
        [S5M8767_IRQ_RTC1S] = {
-               .reg_offset = 3,
+               .reg_offset = 2,
                .mask = S5M8767_IRQ_RTC1S_MASK,
        },
        [S5M8767_IRQ_WTSR] = {
-               .reg_offset = 3,
+               .reg_offset = 2,
                .mask = S5M8767_IRQ_WTSR_MASK,
        },
 };
 
 static struct regmap_irq s5m8763_irqs[] = {
        [S5M8763_IRQ_DCINF] = {
-               .reg_offset = 1,
+               .reg_offset = 0,
                .mask = S5M8763_IRQ_DCINF_MASK,
        },
        [S5M8763_IRQ_DCINR] = {
-               .reg_offset = 1,
+               .reg_offset = 0,
                .mask = S5M8763_IRQ_DCINR_MASK,
        },
        [S5M8763_IRQ_JIGF] = {
-               .reg_offset = 1,
+               .reg_offset = 0,
                .mask = S5M8763_IRQ_JIGF_MASK,
        },
        [S5M8763_IRQ_JIGR] = {
-               .reg_offset = 1,
+               .reg_offset = 0,
                .mask = S5M8763_IRQ_JIGR_MASK,
        },
        [S5M8763_IRQ_PWRONF] = {
-               .reg_offset = 1,
+               .reg_offset = 0,
                .mask = S5M8763_IRQ_PWRONF_MASK,
        },
        [S5M8763_IRQ_PWRONR] = {
-               .reg_offset = 1,
+               .reg_offset = 0,
                .mask = S5M8763_IRQ_PWRONR_MASK,
        },
        [S5M8763_IRQ_WTSREVNT] = {
-               .reg_offset = 2,
+               .reg_offset = 1,
                .mask = S5M8763_IRQ_WTSREVNT_MASK,
        },
        [S5M8763_IRQ_SMPLEVNT] = {
-               .reg_offset = 2,
+               .reg_offset = 1,
                .mask = S5M8763_IRQ_SMPLEVNT_MASK,
        },
        [S5M8763_IRQ_ALARM1] = {
-               .reg_offset = 2,
+               .reg_offset = 1,
                .mask = S5M8763_IRQ_ALARM1_MASK,
        },
        [S5M8763_IRQ_ALARM0] = {
-               .reg_offset = 2,
+               .reg_offset = 1,
                .mask = S5M8763_IRQ_ALARM0_MASK,
        },
        [S5M8763_IRQ_ONKEY1S] = {
-               .reg_offset = 3,
+               .reg_offset = 2,
                .mask = S5M8763_IRQ_ONKEY1S_MASK,
        },
        [S5M8763_IRQ_TOPOFFR] = {
-               .reg_offset = 3,
+               .reg_offset = 2,
                .mask = S5M8763_IRQ_TOPOFFR_MASK,
        },
        [S5M8763_IRQ_DCINOVPR] = {
-               .reg_offset = 3,
+               .reg_offset = 2,
                .mask = S5M8763_IRQ_DCINOVPR_MASK,
        },
        [S5M8763_IRQ_CHGRSTF] = {
-               .reg_offset = 3,
+               .reg_offset = 2,
                .mask = S5M8763_IRQ_CHGRSTF_MASK,
        },
        [S5M8763_IRQ_DONER] = {
-               .reg_offset = 3,
+               .reg_offset = 2,
                .mask = S5M8763_IRQ_DONER_MASK,
        },
        [S5M8763_IRQ_CHGFAULT] = {
-               .reg_offset = 3,
+               .reg_offset = 2,
                .mask = S5M8763_IRQ_CHGFAULT_MASK,
        },
        [S5M8763_IRQ_LOBAT1] = {
-               .reg_offset = 4,
+               .reg_offset = 3,
                .mask = S5M8763_IRQ_LOBAT1_MASK,
        },
        [S5M8763_IRQ_LOBAT2] = {
-               .reg_offset = 4,
+               .reg_offset = 3,
                .mask = S5M8763_IRQ_LOBAT2_MASK,
        },
 };
index d6284ca..1225dcb 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2009-2011 Wind River Systems, Inc.
- * Copyright (c) 2011 ST Microelectronics (Alessandro Rubini)
+ * Copyright (c) 2011 ST Microelectronics (Alessandro Rubini, Davide Ciminaghi)
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
 #include <linux/io.h>
 #include <linux/ioport.h>
 #include <linux/pci.h>
-#include <linux/debugfs.h>
 #include <linux/seq_file.h>
 #include <linux/platform_device.h>
 #include <linux/mfd/core.h>
 #include <linux/mfd/sta2x11-mfd.h>
+#include <linux/regmap.h>
 
 #include <asm/sta2x11.h>
 
+static inline int __reg_within_range(unsigned int r,
+                                    unsigned int start,
+                                    unsigned int end)
+{
+       return ((r >= start) && (r <= end));
+}
+
 /* This describes STA2X11 MFD chip for us, we may have several */
 struct sta2x11_mfd {
        struct sta2x11_instance *instance;
-       spinlock_t lock;
+       struct regmap *regmap[sta2x11_n_mfd_plat_devs];
+       spinlock_t lock[sta2x11_n_mfd_plat_devs];
        struct list_head list;
-       void __iomem *sctl_regs;
-       void __iomem *apbreg_regs;
+       void __iomem *regs[sta2x11_n_mfd_plat_devs];
 };
 
 static LIST_HEAD(sta2x11_mfd_list);
@@ -71,6 +78,7 @@ static struct sta2x11_mfd *sta2x11_mfd_find(struct pci_dev *pdev)
 
 static int sta2x11_mfd_add(struct pci_dev *pdev, gfp_t flags)
 {
+       int i;
        struct sta2x11_mfd *mfd = sta2x11_mfd_find(pdev);
        struct sta2x11_instance *instance;
 
@@ -83,7 +91,8 @@ static int sta2x11_mfd_add(struct pci_dev *pdev, gfp_t flags)
        if (!mfd)
                return -ENOMEM;
        INIT_LIST_HEAD(&mfd->list);
-       spin_lock_init(&mfd->lock);
+       for (i = 0; i < ARRAY_SIZE(mfd->lock); i++)
+               spin_lock_init(&mfd->lock[i]);
        mfd->instance = instance;
        list_add(&mfd->list, &sta2x11_mfd_list);
        return 0;
@@ -100,161 +109,276 @@ static int mfd_remove(struct pci_dev *pdev)
        return 0;
 }
 
-/* These two functions are exported and are not expected to fail */
-u32 sta2x11_sctl_mask(struct pci_dev *pdev, u32 reg, u32 mask, u32 val)
+/* This function is exported and is not expected to fail */
+u32 __sta2x11_mfd_mask(struct pci_dev *pdev, u32 reg, u32 mask, u32 val,
+                      enum sta2x11_mfd_plat_dev index)
 {
        struct sta2x11_mfd *mfd = sta2x11_mfd_find(pdev);
        u32 r;
        unsigned long flags;
+       void __iomem *regs;
 
        if (!mfd) {
                dev_warn(&pdev->dev, ": can't access sctl regs\n");
                return 0;
        }
-       if (!mfd->sctl_regs) {
+
+       regs = mfd->regs[index];
+       if (!regs) {
                dev_warn(&pdev->dev, ": system ctl not initialized\n");
                return 0;
        }
-       spin_lock_irqsave(&mfd->lock, flags);
-       r = readl(mfd->sctl_regs + reg);
+       spin_lock_irqsave(&mfd->lock[index], flags);
+       r = readl(regs + reg);
        r &= ~mask;
        r |= val;
        if (mask)
-               writel(r, mfd->sctl_regs + reg);
-       spin_unlock_irqrestore(&mfd->lock, flags);
+               writel(r, regs + reg);
+       spin_unlock_irqrestore(&mfd->lock[index], flags);
        return r;
 }
-EXPORT_SYMBOL(sta2x11_sctl_mask);
+EXPORT_SYMBOL(__sta2x11_mfd_mask);
 
-u32 sta2x11_apbreg_mask(struct pci_dev *pdev, u32 reg, u32 mask, u32 val)
+int sta2x11_mfd_get_regs_data(struct platform_device *dev,
+                             enum sta2x11_mfd_plat_dev index,
+                             void __iomem **regs,
+                             spinlock_t **lock)
 {
-       struct sta2x11_mfd *mfd = sta2x11_mfd_find(pdev);
-       u32 r;
-       unsigned long flags;
+       struct pci_dev *pdev = *(struct pci_dev **)(dev->dev.platform_data);
+       struct sta2x11_mfd *mfd;
 
-       if (!mfd) {
-               dev_warn(&pdev->dev, ": can't access apb regs\n");
-               return 0;
-       }
-       if (!mfd->apbreg_regs) {
-               dev_warn(&pdev->dev, ": apb bridge not initialized\n");
-               return 0;
-       }
-       spin_lock_irqsave(&mfd->lock, flags);
-       r = readl(mfd->apbreg_regs + reg);
-       r &= ~mask;
-       r |= val;
-       if (mask)
-               writel(r, mfd->apbreg_regs + reg);
-       spin_unlock_irqrestore(&mfd->lock, flags);
-       return r;
+       if (!pdev)
+               return -ENODEV;
+       mfd = sta2x11_mfd_find(pdev);
+       if (!mfd)
+               return -ENODEV;
+       if (index >= sta2x11_n_mfd_plat_devs)
+               return -ENODEV;
+       *regs = mfd->regs[index];
+       *lock = &mfd->lock[index];
+       pr_debug("%s %d *regs = %p\n", __func__, __LINE__, *regs);
+       return *regs ? 0 : -ENODEV;
 }
-EXPORT_SYMBOL(sta2x11_apbreg_mask);
-
-/* Two debugfs files, for our registers (FIXME: one instance only) */
-#define REG(regname) {.name = #regname, .offset = SCTL_ ## regname}
-static struct debugfs_reg32 sta2x11_sctl_regs[] = {
-       REG(SCCTL), REG(ARMCFG), REG(SCPLLCTL), REG(SCPLLFCTRL),
-       REG(SCRESFRACT), REG(SCRESCTRL1), REG(SCRESXTRL2), REG(SCPEREN0),
-       REG(SCPEREN1), REG(SCPEREN2), REG(SCGRST), REG(SCPCIPMCR1),
-       REG(SCPCIPMCR2), REG(SCPCIPMSR1), REG(SCPCIPMSR2), REG(SCPCIPMSR3),
-       REG(SCINTREN), REG(SCRISR), REG(SCCLKSTAT0), REG(SCCLKSTAT1),
-       REG(SCCLKSTAT2), REG(SCRSTSTA),
-};
-#undef REG
+EXPORT_SYMBOL(sta2x11_mfd_get_regs_data);
 
-static struct debugfs_regset32 sctl_regset = {
-       .regs = sta2x11_sctl_regs,
-       .nregs = ARRAY_SIZE(sta2x11_sctl_regs),
-};
+/*
+ * Special sta2x11-mfd regmap lock/unlock functions
+ */
+
+static void sta2x11_regmap_lock(void *__lock)
+{
+       spinlock_t *lock = __lock;
+       spin_lock(lock);
+}
 
-#define REG(regname) {.name = #regname, .offset = regname}
-static struct debugfs_reg32 sta2x11_apbreg_regs[] = {
-       REG(APBREG_BSR), REG(APBREG_PAER), REG(APBREG_PWAC), REG(APBREG_PRAC),
-       REG(APBREG_PCG), REG(APBREG_PUR), REG(APBREG_EMU_PCG),
+static void sta2x11_regmap_unlock(void *__lock)
+{
+       spinlock_t *lock = __lock;
+       spin_unlock(lock);
+}
+
+/* OTP (one time programmable registers do not require locking */
+static void sta2x11_regmap_nolock(void *__lock)
+{
+}
+
+static const char *sta2x11_mfd_names[sta2x11_n_mfd_plat_devs] = {
+       [sta2x11_sctl] = STA2X11_MFD_SCTL_NAME,
+       [sta2x11_apbreg] = STA2X11_MFD_APBREG_NAME,
+       [sta2x11_apb_soc_regs] = STA2X11_MFD_APB_SOC_REGS_NAME,
+       [sta2x11_scr] = STA2X11_MFD_SCR_NAME,
 };
-#undef REG
 
-static struct debugfs_regset32 apbreg_regset = {
-       .regs = sta2x11_apbreg_regs,
-       .nregs = ARRAY_SIZE(sta2x11_apbreg_regs),
+static bool sta2x11_sctl_writeable_reg(struct device *dev, unsigned int reg)
+{
+       return !__reg_within_range(reg, SCTL_SCPCIECSBRST, SCTL_SCRSTSTA);
+}
+
+static struct regmap_config sta2x11_sctl_regmap_config = {
+       .reg_bits = 32,
+       .reg_stride = 4,
+       .val_bits = 32,
+       .lock = sta2x11_regmap_lock,
+       .unlock = sta2x11_regmap_unlock,
+       .max_register = SCTL_SCRSTSTA,
+       .writeable_reg = sta2x11_sctl_writeable_reg,
 };
 
-static struct dentry *sta2x11_sctl_debugfs;
-static struct dentry *sta2x11_apbreg_debugfs;
+static bool sta2x11_scr_readable_reg(struct device *dev, unsigned int reg)
+{
+       return (reg == STA2X11_SECR_CR) ||
+               __reg_within_range(reg, STA2X11_SECR_FVR0, STA2X11_SECR_FVR1);
+}
 
-/* Probe for the two platform devices */
-static int sta2x11_sctl_probe(struct platform_device *dev)
+static bool sta2x11_scr_writeable_reg(struct device *dev, unsigned int reg)
 {
-       struct pci_dev **pdev;
-       struct sta2x11_mfd *mfd;
-       struct resource *res;
+       return false;
+}
 
-       pdev = dev->dev.platform_data;
-       mfd = sta2x11_mfd_find(*pdev);
-       if (!mfd)
-               return -ENODEV;
+static struct regmap_config sta2x11_scr_regmap_config = {
+       .reg_bits = 32,
+       .reg_stride = 4,
+       .val_bits = 32,
+       .lock = sta2x11_regmap_nolock,
+       .unlock = sta2x11_regmap_nolock,
+       .max_register = STA2X11_SECR_FVR1,
+       .readable_reg = sta2x11_scr_readable_reg,
+       .writeable_reg = sta2x11_scr_writeable_reg,
+};
 
-       res = platform_get_resource(dev, IORESOURCE_MEM, 0);
-       if (!res)
-               return -ENOMEM;
+static bool sta2x11_apbreg_readable_reg(struct device *dev, unsigned int reg)
+{
+       /* Two blocks (CAN and MLB, SARAC) 0x100 bytes apart */
+       if (reg >= APBREG_BSR_SARAC)
+               reg -= APBREG_BSR_SARAC;
+       switch (reg) {
+       case APBREG_BSR:
+       case APBREG_PAER:
+       case APBREG_PWAC:
+       case APBREG_PRAC:
+       case APBREG_PCG:
+       case APBREG_PUR:
+       case APBREG_EMU_PCG:
+               return true;
+       default:
+               return false;
+       }
+}
 
-       if (!request_mem_region(res->start, resource_size(res),
-                               "sta2x11-sctl"))
-               return -EBUSY;
+static bool sta2x11_apbreg_writeable_reg(struct device *dev, unsigned int reg)
+{
+       if (reg >= APBREG_BSR_SARAC)
+               reg -= APBREG_BSR_SARAC;
+       if (!sta2x11_apbreg_readable_reg(dev, reg))
+               return false;
+       return reg != APBREG_PAER;
+}
 
-       mfd->sctl_regs = ioremap(res->start, resource_size(res));
-       if (!mfd->sctl_regs) {
-               release_mem_region(res->start, resource_size(res));
-               return -ENOMEM;
+static struct regmap_config sta2x11_apbreg_regmap_config = {
+       .reg_bits = 32,
+       .reg_stride = 4,
+       .val_bits = 32,
+       .lock = sta2x11_regmap_lock,
+       .unlock = sta2x11_regmap_unlock,
+       .max_register = APBREG_EMU_PCG_SARAC,
+       .readable_reg = sta2x11_apbreg_readable_reg,
+       .writeable_reg = sta2x11_apbreg_writeable_reg,
+};
+
+static bool sta2x11_apb_soc_regs_readable_reg(struct device *dev,
+                                             unsigned int reg)
+{
+       return reg <= PCIE_SoC_INT_ROUTER_STATUS3_REG ||
+               __reg_within_range(reg, DMA_IP_CTRL_REG, SPARE3_RESERVED) ||
+               __reg_within_range(reg, MASTER_LOCK_REG,
+                                  SYSTEM_CONFIG_STATUS_REG) ||
+               reg == MSP_CLK_CTRL_REG ||
+               __reg_within_range(reg, COMPENSATION_REG1, TEST_CTL_REG);
+}
+
+static bool sta2x11_apb_soc_regs_writeable_reg(struct device *dev,
+                                              unsigned int reg)
+{
+       if (!sta2x11_apb_soc_regs_readable_reg(dev, reg))
+               return false;
+       switch (reg) {
+       case PCIE_COMMON_CLOCK_CONFIG_0_4_0:
+       case SYSTEM_CONFIG_STATUS_REG:
+       case COMPENSATION_REG1:
+       case PCIE_SoC_INT_ROUTER_STATUS0_REG...PCIE_SoC_INT_ROUTER_STATUS3_REG:
+       case PCIE_PM_STATUS_0_PORT_0_4...PCIE_PM_STATUS_7_0_EP4:
+               return false;
+       default:
+               return true;
        }
-       sctl_regset.base = mfd->sctl_regs;
-       sta2x11_sctl_debugfs = debugfs_create_regset32("sta2x11-sctl",
-                                                 S_IFREG | S_IRUGO,
-                                                 NULL, &sctl_regset);
-       return 0;
 }
 
-static int sta2x11_apbreg_probe(struct platform_device *dev)
+static struct regmap_config sta2x11_apb_soc_regs_regmap_config = {
+       .reg_bits = 32,
+       .reg_stride = 4,
+       .val_bits = 32,
+       .lock = sta2x11_regmap_lock,
+       .unlock = sta2x11_regmap_unlock,
+       .max_register = TEST_CTL_REG,
+       .readable_reg = sta2x11_apb_soc_regs_readable_reg,
+       .writeable_reg = sta2x11_apb_soc_regs_writeable_reg,
+};
+
+static struct regmap_config *
+sta2x11_mfd_regmap_configs[sta2x11_n_mfd_plat_devs] = {
+       [sta2x11_sctl] = &sta2x11_sctl_regmap_config,
+       [sta2x11_apbreg] = &sta2x11_apbreg_regmap_config,
+       [sta2x11_apb_soc_regs] = &sta2x11_apb_soc_regs_regmap_config,
+       [sta2x11_scr] = &sta2x11_scr_regmap_config,
+};
+
+/* Probe for the four platform devices */
+
+static int sta2x11_mfd_platform_probe(struct platform_device *dev,
+                                     enum sta2x11_mfd_plat_dev index)
 {
        struct pci_dev **pdev;
        struct sta2x11_mfd *mfd;
        struct resource *res;
+       const char *name = sta2x11_mfd_names[index];
+       struct regmap_config *regmap_config = sta2x11_mfd_regmap_configs[index];
 
        pdev = dev->dev.platform_data;
-       dev_dbg(&dev->dev, "%s: pdata is %p\n", __func__, pdev);
-       dev_dbg(&dev->dev, "%s: *pdata is %p\n", __func__, *pdev);
-
        mfd = sta2x11_mfd_find(*pdev);
        if (!mfd)
                return -ENODEV;
+       if (!regmap_config)
+               return -ENODEV;
 
        res = platform_get_resource(dev, IORESOURCE_MEM, 0);
        if (!res)
                return -ENOMEM;
 
-       if (!request_mem_region(res->start, resource_size(res),
-                               "sta2x11-apbreg"))
+       if (!request_mem_region(res->start, resource_size(res), name))
                return -EBUSY;
 
-       mfd->apbreg_regs = ioremap(res->start, resource_size(res));
-       if (!mfd->apbreg_regs) {
+       mfd->regs[index] = ioremap(res->start, resource_size(res));
+       if (!mfd->regs[index]) {
                release_mem_region(res->start, resource_size(res));
                return -ENOMEM;
        }
-       dev_dbg(&dev->dev, "%s: regbase %p\n", __func__, mfd->apbreg_regs);
+       regmap_config->lock_arg = &mfd->lock;
+       /*
+          No caching, registers could be reached both via regmap and via
+          void __iomem *
+       */
+       regmap_config->cache_type = REGCACHE_NONE;
+       mfd->regmap[index] = devm_regmap_init_mmio(&dev->dev, mfd->regs[index],
+                                                  regmap_config);
+       WARN_ON(!mfd->regmap[index]);
 
-       apbreg_regset.base = mfd->apbreg_regs;
-       sta2x11_apbreg_debugfs = debugfs_create_regset32("sta2x11-apbreg",
-                                                 S_IFREG | S_IRUGO,
-                                                 NULL, &apbreg_regset);
        return 0;
 }
 
-/* The two platform drivers */
+static int sta2x11_sctl_probe(struct platform_device *dev)
+{
+       return sta2x11_mfd_platform_probe(dev, sta2x11_sctl);
+}
+
+static int sta2x11_apbreg_probe(struct platform_device *dev)
+{
+       return sta2x11_mfd_platform_probe(dev, sta2x11_apbreg);
+}
+
+static int sta2x11_apb_soc_regs_probe(struct platform_device *dev)
+{
+       return sta2x11_mfd_platform_probe(dev, sta2x11_apb_soc_regs);
+}
+
+static int sta2x11_scr_probe(struct platform_device *dev)
+{
+       return sta2x11_mfd_platform_probe(dev, sta2x11_scr);
+}
+
+/* The three platform drivers */
 static struct platform_driver sta2x11_sctl_platform_driver = {
        .driver = {
-               .name   = "sta2x11-sctl",
+               .name   = STA2X11_MFD_SCTL_NAME,
                .owner  = THIS_MODULE,
        },
        .probe          = sta2x11_sctl_probe,
@@ -268,7 +392,7 @@ static int __init sta2x11_sctl_init(void)
 
 static struct platform_driver sta2x11_platform_driver = {
        .driver = {
-               .name   = "sta2x11-apbreg",
+               .name   = STA2X11_MFD_APBREG_NAME,
                .owner  = THIS_MODULE,
        },
        .probe          = sta2x11_apbreg_probe,
@@ -280,13 +404,44 @@ static int __init sta2x11_apbreg_init(void)
        return platform_driver_register(&sta2x11_platform_driver);
 }
 
+static struct platform_driver sta2x11_apb_soc_regs_platform_driver = {
+       .driver = {
+               .name   = STA2X11_MFD_APB_SOC_REGS_NAME,
+               .owner  = THIS_MODULE,
+       },
+       .probe          = sta2x11_apb_soc_regs_probe,
+};
+
+static int __init sta2x11_apb_soc_regs_init(void)
+{
+       pr_info("%s\n", __func__);
+       return platform_driver_register(&sta2x11_apb_soc_regs_platform_driver);
+}
+
+static struct platform_driver sta2x11_scr_platform_driver = {
+       .driver = {
+               .name = STA2X11_MFD_SCR_NAME,
+               .owner = THIS_MODULE,
+       },
+       .probe = sta2x11_scr_probe,
+};
+
+static int __init sta2x11_scr_init(void)
+{
+       pr_info("%s\n", __func__);
+       return platform_driver_register(&sta2x11_scr_platform_driver);
+}
+
+
 /*
- * What follows is the PCI device that hosts the above two pdevs.
+ * What follows are the PCI devices that host the above pdevs.
  * Each logic block is 4kB and they are all consecutive: we use this info.
  */
 
-/* Bar 0 */
-enum bar0_cells {
+/* Mfd 0 device */
+
+/* Mfd 0, Bar 0 */
+enum mfd0_bar0_cells {
        STA2X11_GPIO_0 = 0,
        STA2X11_GPIO_1,
        STA2X11_GPIO_2,
@@ -295,8 +450,8 @@ enum bar0_cells {
        STA2X11_SCR,
        STA2X11_TIME,
 };
-/* Bar 1 */
-enum bar1_cells {
+/* Mfd 0 , Bar 1 */
+enum mfd0_bar1_cells {
        STA2X11_APBREG = 0,
 };
 #define CELL_4K(_name, _cell) { \
@@ -307,40 +462,71 @@ enum bar1_cells {
 
 static const struct resource gpio_resources[] = {
        {
-               .name = "sta2x11_gpio", /* 4 consecutive cells, 1 driver */
+               /* 4 consecutive cells, 1 driver */
+               .name = STA2X11_MFD_GPIO_NAME,
                .start = 0,
                .end = (4 * 4096) - 1,
                .flags = IORESOURCE_MEM,
        }
 };
 static const struct resource sctl_resources[] = {
-       CELL_4K("sta2x11-sctl", STA2X11_SCTL),
+       CELL_4K(STA2X11_MFD_SCTL_NAME, STA2X11_SCTL),
 };
 static const struct resource scr_resources[] = {
-       CELL_4K("sta2x11-scr", STA2X11_SCR),
+       CELL_4K(STA2X11_MFD_SCR_NAME, STA2X11_SCR),
 };
 static const struct resource time_resources[] = {
-       CELL_4K("sta2x11-time", STA2X11_TIME),
+       CELL_4K(STA2X11_MFD_TIME_NAME, STA2X11_TIME),
 };
 
 static const struct resource apbreg_resources[] = {
-       CELL_4K("sta2x11-apbreg", STA2X11_APBREG),
+       CELL_4K(STA2X11_MFD_APBREG_NAME, STA2X11_APBREG),
 };
 
 #define DEV(_name, _r) \
        { .name = _name, .num_resources = ARRAY_SIZE(_r), .resources = _r, }
 
-static struct mfd_cell sta2x11_mfd_bar0[] = {
-       DEV("sta2x11-gpio", gpio_resources), /* offset 0: we add pdata later */
-       DEV("sta2x11-sctl", sctl_resources),
-       DEV("sta2x11-scr", scr_resources),
-       DEV("sta2x11-time", time_resources),
+static struct mfd_cell sta2x11_mfd0_bar0[] = {
+       /* offset 0: we add pdata later */
+       DEV(STA2X11_MFD_GPIO_NAME, gpio_resources),
+       DEV(STA2X11_MFD_SCTL_NAME, sctl_resources),
+       DEV(STA2X11_MFD_SCR_NAME,  scr_resources),
+       DEV(STA2X11_MFD_TIME_NAME, time_resources),
 };
 
-static struct mfd_cell sta2x11_mfd_bar1[] = {
-       DEV("sta2x11-apbreg", apbreg_resources),
+static struct mfd_cell sta2x11_mfd0_bar1[] = {
+       DEV(STA2X11_MFD_APBREG_NAME, apbreg_resources),
 };
 
+/* Mfd 1 devices */
+
+/* Mfd 1, Bar 0 */
+enum mfd1_bar0_cells {
+       STA2X11_VIC = 0,
+};
+
+/* Mfd 1, Bar 1 */
+enum mfd1_bar1_cells {
+       STA2X11_APB_SOC_REGS = 0,
+};
+
+static const __devinitconst struct resource vic_resources[] = {
+       CELL_4K(STA2X11_MFD_VIC_NAME, STA2X11_VIC),
+};
+
+static const __devinitconst struct resource apb_soc_regs_resources[] = {
+       CELL_4K(STA2X11_MFD_APB_SOC_REGS_NAME, STA2X11_APB_SOC_REGS),
+};
+
+static __devinitdata struct mfd_cell sta2x11_mfd1_bar0[] = {
+       DEV(STA2X11_MFD_VIC_NAME, vic_resources),
+};
+
+static __devinitdata struct mfd_cell sta2x11_mfd1_bar1[] = {
+       DEV(STA2X11_MFD_APB_SOC_REGS_NAME, apb_soc_regs_resources),
+};
+
+
 static int sta2x11_mfd_suspend(struct pci_dev *pdev, pm_message_t state)
 {
        pci_save_state(pdev);
@@ -363,11 +549,63 @@ static int sta2x11_mfd_resume(struct pci_dev *pdev)
        return 0;
 }
 
+struct sta2x11_mfd_bar_setup_data {
+       struct mfd_cell *cells;
+       int ncells;
+};
+
+struct sta2x11_mfd_setup_data {
+       struct sta2x11_mfd_bar_setup_data bars[2];
+};
+
+#define STA2X11_MFD0 0
+#define STA2X11_MFD1 1
+
+static struct sta2x11_mfd_setup_data mfd_setup_data[] = {
+       /* Mfd 0: gpio, sctl, scr, timers / apbregs */
+       [STA2X11_MFD0] = {
+               .bars = {
+                       [0] = {
+                               .cells = sta2x11_mfd0_bar0,
+                               .ncells = ARRAY_SIZE(sta2x11_mfd0_bar0),
+                       },
+                       [1] = {
+                               .cells = sta2x11_mfd0_bar1,
+                               .ncells = ARRAY_SIZE(sta2x11_mfd0_bar1),
+                       },
+               },
+       },
+       /* Mfd 1: vic / apb-soc-regs */
+       [STA2X11_MFD1] = {
+               .bars = {
+                       [0] = {
+                               .cells = sta2x11_mfd1_bar0,
+                               .ncells = ARRAY_SIZE(sta2x11_mfd1_bar0),
+                       },
+                       [1] = {
+                               .cells = sta2x11_mfd1_bar1,
+                               .ncells = ARRAY_SIZE(sta2x11_mfd1_bar1),
+                       },
+               },
+       },
+};
+
+static void sta2x11_mfd_setup(struct pci_dev *pdev,
+                             struct sta2x11_mfd_setup_data *sd)
+{
+       int i, j;
+       for (i = 0; i < ARRAY_SIZE(sd->bars); i++)
+               for (j = 0; j < sd->bars[i].ncells; j++) {
+                       sd->bars[i].cells[j].pdata_size = sizeof(pdev);
+                       sd->bars[i].cells[j].platform_data = &pdev;
+               }
+}
+
 static int sta2x11_mfd_probe(struct pci_dev *pdev,
-                                      const struct pci_device_id *pci_id)
+                            const struct pci_device_id *pci_id)
 {
        int err, i;
-       struct sta2x11_gpio_pdata *gpio_data;
+       struct sta2x11_mfd_setup_data *setup_data;
 
        dev_info(&pdev->dev, "%s\n", __func__);
 
@@ -381,46 +619,29 @@ static int sta2x11_mfd_probe(struct pci_dev *pdev,
        if (err)
                dev_info(&pdev->dev, "Enable msi failed\n");
 
-       /* Read gpio config data as pci device's platform data */
-       gpio_data = dev_get_platdata(&pdev->dev);
-       if (!gpio_data)
-               dev_warn(&pdev->dev, "no gpio configuration\n");
-
-       dev_dbg(&pdev->dev, "%s, gpio_data = %p (%p)\n", __func__,
-               gpio_data, &gpio_data);
-       dev_dbg(&pdev->dev, "%s, pdev = %p (%p)\n", __func__,
-               pdev, &pdev);
+       setup_data = pci_id->device == PCI_DEVICE_ID_STMICRO_GPIO ?
+               &mfd_setup_data[STA2X11_MFD0] :
+               &mfd_setup_data[STA2X11_MFD1];
 
        /* platform data is the pci device for all of them */
-       for (i = 0; i < ARRAY_SIZE(sta2x11_mfd_bar0); i++) {
-               sta2x11_mfd_bar0[i].pdata_size = sizeof(pdev);
-               sta2x11_mfd_bar0[i].platform_data = &pdev;
-       }
-       sta2x11_mfd_bar1[0].pdata_size = sizeof(pdev);
-       sta2x11_mfd_bar1[0].platform_data = &pdev;
+       sta2x11_mfd_setup(pdev, setup_data);
 
        /* Record this pdev before mfd_add_devices: their probe looks for it */
-       sta2x11_mfd_add(pdev, GFP_ATOMIC);
-
-
-       err = mfd_add_devices(&pdev->dev, -1,
-                             sta2x11_mfd_bar0,
-                             ARRAY_SIZE(sta2x11_mfd_bar0),
-                             &pdev->resource[0],
-                             0, NULL);
-       if (err) {
-               dev_err(&pdev->dev, "mfd_add_devices[0] failed: %d\n", err);
-               goto err_disable;
-       }
-
-       err = mfd_add_devices(&pdev->dev, -1,
-                             sta2x11_mfd_bar1,
-                             ARRAY_SIZE(sta2x11_mfd_bar1),
-                             &pdev->resource[1],
-                             0, NULL);
-       if (err) {
-               dev_err(&pdev->dev, "mfd_add_devices[1] failed: %d\n", err);
-               goto err_disable;
+       if (!sta2x11_mfd_find(pdev))
+               sta2x11_mfd_add(pdev, GFP_ATOMIC);
+
+       /* Just 2 bars for all mfd's at present */
+       for (i = 0; i < 2; i++) {
+               err = mfd_add_devices(&pdev->dev, -1,
+                                     setup_data->bars[i].cells,
+                                     setup_data->bars[i].ncells,
+                                     &pdev->resource[i],
+                                     0, NULL);
+               if (err) {
+                       dev_err(&pdev->dev,
+                               "mfd_add_devices[%d] failed: %d\n", i, err);
+                       goto err_disable;
+               }
        }
 
        return 0;
@@ -434,6 +655,7 @@ err_disable:
 
 static DEFINE_PCI_DEVICE_TABLE(sta2x11_mfd_tbl) = {
        {PCI_DEVICE(PCI_VENDOR_ID_STMICRO, PCI_DEVICE_ID_STMICRO_GPIO)},
+       {PCI_DEVICE(PCI_VENDOR_ID_STMICRO, PCI_DEVICE_ID_STMICRO_VIC)},
        {0,},
 };
 
@@ -459,6 +681,8 @@ static int __init sta2x11_mfd_init(void)
  */
 subsys_initcall(sta2x11_apbreg_init);
 subsys_initcall(sta2x11_sctl_init);
+subsys_initcall(sta2x11_apb_soc_regs_init);
+subsys_initcall(sta2x11_scr_init);
 rootfs_initcall(sta2x11_mfd_init);
 
 MODULE_LICENSE("GPL v2");
index 36df187..fd5fcb6 100644 (file)
@@ -82,11 +82,13 @@ static const struct i2c_device_id stmpe_i2c_id[] = {
 MODULE_DEVICE_TABLE(i2c, stmpe_id);
 
 static struct i2c_driver stmpe_i2c_driver = {
-       .driver.name    = "stmpe-i2c",
-       .driver.owner   = THIS_MODULE,
+       .driver = {
+               .name = "stmpe-i2c",
+               .owner = THIS_MODULE,
 #ifdef CONFIG_PM
-       .driver.pm      = &stmpe_dev_pm_ops,
+               .pm = &stmpe_dev_pm_ops,
 #endif
+       },
        .probe          = stmpe_i2c_probe,
        .remove         = stmpe_i2c_remove,
        .id_table       = stmpe_i2c_id,
index 79e88d1..1963619 100644 (file)
@@ -7,11 +7,15 @@
  * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
  */
 
+#include <linux/err.h>
 #include <linux/gpio.h>
 #include <linux/export.h>
 #include <linux/kernel.h>
 #include <linux/interrupt.h>
 #include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
 #include <linux/pm.h>
 #include <linux/slab.h>
 #include <linux/mfd/core.h>
@@ -312,14 +316,10 @@ static struct mfd_cell stmpe_gpio_cell_noirq = {
 static struct resource stmpe_keypad_resources[] = {
        {
                .name   = "KEYPAD",
-               .start  = 0,
-               .end    = 0,
                .flags  = IORESOURCE_IRQ,
        },
        {
                .name   = "KEYPAD_OVER",
-               .start  = 1,
-               .end    = 1,
                .flags  = IORESOURCE_IRQ,
        },
 };
@@ -399,14 +399,10 @@ static struct stmpe_variant_info stmpe801_noirq = {
 static struct resource stmpe_ts_resources[] = {
        {
                .name   = "TOUCH_DET",
-               .start  = 0,
-               .end    = 0,
                .flags  = IORESOURCE_IRQ,
        },
        {
                .name   = "FIFO_TH",
-               .start  = 1,
-               .end    = 1,
                .flags  = IORESOURCE_IRQ,
        },
 };
@@ -528,12 +524,12 @@ static const u8 stmpe1601_regs[] = {
 static struct stmpe_variant_block stmpe1601_blocks[] = {
        {
                .cell   = &stmpe_gpio_cell,
-               .irq    = STMPE24XX_IRQ_GPIOC,
+               .irq    = STMPE1601_IRQ_GPIOC,
                .block  = STMPE_BLOCK_GPIO,
        },
        {
                .cell   = &stmpe_keypad_cell,
-               .irq    = STMPE24XX_IRQ_KEYPAD,
+               .irq    = STMPE1601_IRQ_KEYPAD,
                .block  = STMPE_BLOCK_KEYPAD,
        },
 };
@@ -767,7 +763,9 @@ static irqreturn_t stmpe_irq(int irq, void *data)
        int i;
 
        if (variant->id_val == STMPE801_ID) {
-               handle_nested_irq(stmpe->irq_base);
+               int base = irq_create_mapping(stmpe->domain, 0);
+
+               handle_nested_irq(base);
                return IRQ_HANDLED;
        }
 
@@ -788,8 +786,9 @@ static irqreturn_t stmpe_irq(int irq, void *data)
                while (status) {
                        int bit = __ffs(status);
                        int line = bank * 8 + bit;
+                       int nestedirq = irq_create_mapping(stmpe->domain, line);
 
-                       handle_nested_irq(stmpe->irq_base + line);
+                       handle_nested_irq(nestedirq);
                        status &= ~(1 << bit);
                }
 
@@ -830,7 +829,7 @@ static void stmpe_irq_sync_unlock(struct irq_data *data)
 static void stmpe_irq_mask(struct irq_data *data)
 {
        struct stmpe *stmpe = irq_data_get_irq_chip_data(data);
-       int offset = data->irq - stmpe->irq_base;
+       int offset = data->hwirq;
        int regoffset = offset / 8;
        int mask = 1 << (offset % 8);
 
@@ -840,7 +839,7 @@ static void stmpe_irq_mask(struct irq_data *data)
 static void stmpe_irq_unmask(struct irq_data *data)
 {
        struct stmpe *stmpe = irq_data_get_irq_chip_data(data);
-       int offset = data->irq - stmpe->irq_base;
+       int offset = data->hwirq;
        int regoffset = offset / 8;
        int mask = 1 << (offset % 8);
 
@@ -855,43 +854,59 @@ static struct irq_chip stmpe_irq_chip = {
        .irq_unmask             = stmpe_irq_unmask,
 };
 
-static int __devinit stmpe_irq_init(struct stmpe *stmpe)
+static int stmpe_irq_map(struct irq_domain *d, unsigned int virq,
+                                irq_hw_number_t hwirq)
 {
+       struct stmpe *stmpe = d->host_data;
        struct irq_chip *chip = NULL;
-       int num_irqs = stmpe->variant->num_irqs;
-       int base = stmpe->irq_base;
-       int irq;
 
        if (stmpe->variant->id_val != STMPE801_ID)
                chip = &stmpe_irq_chip;
 
-       for (irq = base; irq < base + num_irqs; irq++) {
-               irq_set_chip_data(irq, stmpe);
-               irq_set_chip_and_handler(irq, chip, handle_edge_irq);
-               irq_set_nested_thread(irq, 1);
+       irq_set_chip_data(virq, stmpe);
+       irq_set_chip_and_handler(virq, chip, handle_edge_irq);
+       irq_set_nested_thread(virq, 1);
 #ifdef CONFIG_ARM
-               set_irq_flags(irq, IRQF_VALID);
+       set_irq_flags(virq, IRQF_VALID);
 #else
-               irq_set_noprobe(irq);
+       irq_set_noprobe(virq);
 #endif
-       }
 
        return 0;
 }
 
-static void stmpe_irq_remove(struct stmpe *stmpe)
+static void stmpe_irq_unmap(struct irq_domain *d, unsigned int virq)
 {
-       int num_irqs = stmpe->variant->num_irqs;
-       int base = stmpe->irq_base;
-       int irq;
-
-       for (irq = base; irq < base + num_irqs; irq++) {
 #ifdef CONFIG_ARM
-               set_irq_flags(irq, 0);
+               set_irq_flags(virq, 0);
 #endif
-               irq_set_chip_and_handler(irq, NULL, NULL);
-               irq_set_chip_data(irq, NULL);
+               irq_set_chip_and_handler(virq, NULL, NULL);
+               irq_set_chip_data(virq, NULL);
+}
+
+static struct irq_domain_ops stmpe_irq_ops = {
+        .map    = stmpe_irq_map,
+        .unmap  = stmpe_irq_unmap,
+        .xlate  = irq_domain_xlate_twocell,
+};
+
+static int __devinit stmpe_irq_init(struct stmpe *stmpe,
+                               struct device_node *np)
+{
+       int base = 0;
+       int num_irqs = stmpe->variant->num_irqs;
+
+       if (!np)
+               base = stmpe->irq_base;
+
+       stmpe->domain = irq_domain_add_simple(np, num_irqs, base,
+                                             &stmpe_irq_ops, stmpe);
+       if (!stmpe->domain) {
+               dev_err(stmpe->dev, "Failed to create irqdomain\n");
+               return -ENOSYS;
        }
+
+       return 0;
 }
 
 static int __devinit stmpe_chip_init(struct stmpe *stmpe)
@@ -942,13 +957,6 @@ static int __devinit stmpe_chip_init(struct stmpe *stmpe)
                        else
                                icr |= STMPE_ICR_LSB_HIGH;
                }
-
-               if (stmpe->pdata->irq_invert_polarity) {
-                       if (id == STMPE801_ID)
-                               icr ^= STMPE801_REG_SYS_CTRL_INT_HI;
-                       else
-                               icr ^= STMPE_ICR_LSB_HIGH;
-               }
        }
 
        if (stmpe->pdata->autosleep) {
@@ -961,10 +969,10 @@ static int __devinit stmpe_chip_init(struct stmpe *stmpe)
 }
 
 static int __devinit stmpe_add_device(struct stmpe *stmpe,
-                                     struct mfd_cell *cell, int irq)
+                                     struct mfd_cell *cell)
 {
        return mfd_add_devices(stmpe->dev, stmpe->pdata->id, cell, 1,
-                              NULL, stmpe->irq_base + irq, NULL);
+                              NULL, stmpe->irq_base, stmpe->domain);
 }
 
 static int __devinit stmpe_devices_init(struct stmpe *stmpe)
@@ -972,7 +980,7 @@ static int __devinit stmpe_devices_init(struct stmpe *stmpe)
        struct stmpe_variant_info *variant = stmpe->variant;
        unsigned int platform_blocks = stmpe->pdata->blocks;
        int ret = -EINVAL;
-       int i;
+       int i, j;
 
        for (i = 0; i < variant->num_blocks; i++) {
                struct stmpe_variant_block *block = &variant->blocks[i];
@@ -980,8 +988,17 @@ static int __devinit stmpe_devices_init(struct stmpe *stmpe)
                if (!(platform_blocks & block->block))
                        continue;
 
+               for (j = 0; j < block->cell->num_resources; j++) {
+                       struct resource *res =
+                               (struct resource *) &block->cell->resources[j];
+
+                       /* Dynamically fill in a variant's IRQ. */
+                       if (res->flags & IORESOURCE_IRQ)
+                               res->start = res->end = block->irq + j;
+               }
+
                platform_blocks &= ~block->block;
-               ret = stmpe_add_device(stmpe, block->cell, block->irq);
+               ret = stmpe_add_device(stmpe, block->cell);
                if (ret)
                        return ret;
        }
@@ -994,17 +1011,56 @@ static int __devinit stmpe_devices_init(struct stmpe *stmpe)
        return ret;
 }
 
+void __devinit stmpe_of_probe(struct stmpe_platform_data *pdata,
+                       struct device_node *np)
+{
+       struct device_node *child;
+
+       pdata->id = -1;
+       pdata->irq_trigger = IRQF_TRIGGER_NONE;
+
+       of_property_read_u32(np, "st,autosleep-timeout",
+                       &pdata->autosleep_timeout);
+
+       pdata->autosleep = (pdata->autosleep_timeout) ? true : false;
+
+       for_each_child_of_node(np, child) {
+               if (!strcmp(child->name, "stmpe_gpio")) {
+                       pdata->blocks |= STMPE_BLOCK_GPIO;
+               } else if (!strcmp(child->name, "stmpe_keypad")) {
+                       pdata->blocks |= STMPE_BLOCK_KEYPAD;
+               } else if (!strcmp(child->name, "stmpe_touchscreen")) {
+                       pdata->blocks |= STMPE_BLOCK_TOUCHSCREEN;
+               } else if (!strcmp(child->name, "stmpe_adc")) {
+                       pdata->blocks |= STMPE_BLOCK_ADC;
+               } else if (!strcmp(child->name, "stmpe_pwm")) {
+                       pdata->blocks |= STMPE_BLOCK_PWM;
+               } else if (!strcmp(child->name, "stmpe_rotator")) {
+                       pdata->blocks |= STMPE_BLOCK_ROTATOR;
+               }
+       }
+}
+
 /* Called from client specific probe routines */
 int __devinit stmpe_probe(struct stmpe_client_info *ci, int partnum)
 {
        struct stmpe_platform_data *pdata = dev_get_platdata(ci->dev);
+       struct device_node *np = ci->dev->of_node;
        struct stmpe *stmpe;
        int ret;
 
-       if (!pdata)
-               return -EINVAL;
+       if (!pdata) {
+               if (!np)
+                       return -EINVAL;
+
+               pdata = devm_kzalloc(ci->dev, sizeof(*pdata), GFP_KERNEL);
+               if (!pdata)
+                       return -ENOMEM;
+
+               stmpe_of_probe(pdata, np);
+       }
 
-       stmpe = kzalloc(sizeof(struct stmpe), GFP_KERNEL);
+       stmpe = devm_kzalloc(ci->dev, sizeof(struct stmpe), GFP_KERNEL);
        if (!stmpe)
                return -ENOMEM;
 
@@ -1026,11 +1082,12 @@ int __devinit stmpe_probe(struct stmpe_client_info *ci, int partnum)
                ci->init(stmpe);
 
        if (pdata->irq_over_gpio) {
-               ret = gpio_request_one(pdata->irq_gpio, GPIOF_DIR_IN, "stmpe");
+               ret = devm_gpio_request_one(ci->dev, pdata->irq_gpio,
+                               GPIOF_DIR_IN, "stmpe");
                if (ret) {
                        dev_err(stmpe->dev, "failed to request IRQ GPIO: %d\n",
                                        ret);
-                       goto out_free;
+                       return ret;
                }
 
                stmpe->irq = gpio_to_irq(pdata->irq_gpio);
@@ -1047,51 +1104,40 @@ int __devinit stmpe_probe(struct stmpe_client_info *ci, int partnum)
                        dev_err(stmpe->dev,
                                "%s does not support no-irq mode!\n",
                                stmpe->variant->name);
-                       ret = -ENODEV;
-                       goto free_gpio;
+                       return -ENODEV;
                }
                stmpe->variant = stmpe_noirq_variant_info[stmpe->partnum];
+       } else if (pdata->irq_trigger == IRQF_TRIGGER_NONE) {
+               pdata->irq_trigger =
+                       irqd_get_trigger_type(irq_get_irq_data(stmpe->irq));
        }
 
        ret = stmpe_chip_init(stmpe);
        if (ret)
-               goto free_gpio;
+               return ret;
 
        if (stmpe->irq >= 0) {
-               ret = stmpe_irq_init(stmpe);
+               ret = stmpe_irq_init(stmpe, np);
                if (ret)
-                       goto free_gpio;
+                       return ret;
 
-               ret = request_threaded_irq(stmpe->irq, NULL, stmpe_irq,
-                               pdata->irq_trigger | IRQF_ONESHOT,
+               ret = devm_request_threaded_irq(ci->dev, stmpe->irq, NULL,
+                               stmpe_irq, pdata->irq_trigger | IRQF_ONESHOT,
                                "stmpe", stmpe);
                if (ret) {
                        dev_err(stmpe->dev, "failed to request IRQ: %d\n",
                                        ret);
-                       goto out_removeirq;
+                       return ret;
                }
        }
 
        ret = stmpe_devices_init(stmpe);
-       if (ret) {
-               dev_err(stmpe->dev, "failed to add children\n");
-               goto out_removedevs;
-       }
-
-       return 0;
+       if (!ret)
+               return 0;
 
-out_removedevs:
+       dev_err(stmpe->dev, "failed to add children\n");
        mfd_remove_devices(stmpe->dev);
-       if (stmpe->irq >= 0)
-               free_irq(stmpe->irq, stmpe);
-out_removeirq:
-       if (stmpe->irq >= 0)
-               stmpe_irq_remove(stmpe);
-free_gpio:
-       if (pdata->irq_over_gpio)
-               gpio_free(pdata->irq_gpio);
-out_free:
-       kfree(stmpe);
+
        return ret;
 }
 
@@ -1099,16 +1145,6 @@ int stmpe_remove(struct stmpe *stmpe)
 {
        mfd_remove_devices(stmpe->dev);
 
-       if (stmpe->irq >= 0) {
-               free_irq(stmpe->irq, stmpe);
-               stmpe_irq_remove(stmpe);
-       }
-
-       if (stmpe->pdata->irq_over_gpio)
-               gpio_free(stmpe->pdata->irq_gpio);
-
-       kfree(stmpe);
-
        return 0;
 }
 
diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c
new file mode 100644 (file)
index 0000000..8ca3bf0
--- /dev/null
@@ -0,0 +1,274 @@
+/*
+ * TI Touch Screen / ADC MFD driver
+ *
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/regmap.h>
+#include <linux/mfd/core.h>
+#include <linux/pm_runtime.h>
+
+#include <linux/mfd/ti_am335x_tscadc.h>
+#include <linux/input/ti_am335x_tsc.h>
+#include <linux/platform_data/ti_am335x_adc.h>
+
+static unsigned int tscadc_readl(struct ti_tscadc_dev *tsadc, unsigned int reg)
+{
+       unsigned int val;
+
+       regmap_read(tsadc->regmap_tscadc, reg, &val);
+       return val;
+}
+
+static void tscadc_writel(struct ti_tscadc_dev *tsadc, unsigned int reg,
+                                       unsigned int val)
+{
+       regmap_write(tsadc->regmap_tscadc, reg, val);
+}
+
+static const struct regmap_config tscadc_regmap_config = {
+       .name = "ti_tscadc",
+       .reg_bits = 32,
+       .reg_stride = 4,
+       .val_bits = 32,
+};
+
+static void tscadc_idle_config(struct ti_tscadc_dev *config)
+{
+       unsigned int idleconfig;
+
+       idleconfig = STEPCONFIG_YNN | STEPCONFIG_INM_ADCREFM |
+                       STEPCONFIG_INP_ADCREFM | STEPCONFIG_YPN;
+
+       tscadc_writel(config, REG_IDLECONFIG, idleconfig);
+}
+
+static int __devinit ti_tscadc_probe(struct platform_device *pdev)
+{
+       struct ti_tscadc_dev    *tscadc;
+       struct resource         *res;
+       struct clk              *clk;
+       struct mfd_tscadc_board *pdata = pdev->dev.platform_data;
+       struct mfd_cell         *cell;
+       int                     err, ctrl;
+       int                     clk_value, clock_rate;
+       int                     tsc_wires, adc_channels = 0, total_channels;
+
+       if (!pdata) {
+               dev_err(&pdev->dev, "Could not find platform data\n");
+               return -EINVAL;
+       }
+
+       if (pdata->adc_init)
+               adc_channels = pdata->adc_init->adc_channels;
+
+       tsc_wires = pdata->tsc_init->wires;
+       total_channels = tsc_wires + adc_channels;
+
+       if (total_channels > 8) {
+               dev_err(&pdev->dev, "Number of i/p channels more than 8\n");
+               return -EINVAL;
+       }
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res) {
+               dev_err(&pdev->dev, "no memory resource defined.\n");
+               return -EINVAL;
+       }
+
+       /* Allocate memory for device */
+       tscadc = devm_kzalloc(&pdev->dev,
+                       sizeof(struct ti_tscadc_dev), GFP_KERNEL);
+       if (!tscadc) {
+               dev_err(&pdev->dev, "failed to allocate memory.\n");
+               return -ENOMEM;
+       }
+       tscadc->dev = &pdev->dev;
+
+       err = platform_get_irq(pdev, 0);
+       if (err < 0) {
+               dev_err(&pdev->dev, "no irq ID is specified.\n");
+               goto ret;
+       } else
+               tscadc->irq = err;
+
+       res = devm_request_mem_region(&pdev->dev,
+                       res->start, resource_size(res), pdev->name);
+       if (!res) {
+               dev_err(&pdev->dev, "failed to reserve registers.\n");
+               return -EBUSY;
+       }
+
+       tscadc->tscadc_base = devm_ioremap(&pdev->dev,
+                       res->start, resource_size(res));
+       if (!tscadc->tscadc_base) {
+               dev_err(&pdev->dev, "failed to map registers.\n");
+               return -ENOMEM;
+       }
+
+       tscadc->regmap_tscadc = devm_regmap_init_mmio(&pdev->dev,
+                       tscadc->tscadc_base, &tscadc_regmap_config);
+       if (IS_ERR(tscadc->regmap_tscadc)) {
+               dev_err(&pdev->dev, "regmap init failed\n");
+               err = PTR_ERR(tscadc->regmap_tscadc);
+               goto ret;
+       }
+
+       pm_runtime_enable(&pdev->dev);
+       pm_runtime_get_sync(&pdev->dev);
+
+       /*
+        * The TSC_ADC_Subsystem has 2 clock domains
+        * OCP_CLK and ADC_CLK.
+        * The ADC clock is expected to run at target of 3MHz,
+        * and expected to capture 12-bit data at a rate of 200 KSPS.
+        * The TSC_ADC_SS controller design assumes the OCP clock is
+        * at least 6x faster than the ADC clock.
+        */
+       clk = clk_get(&pdev->dev, "adc_tsc_fck");
+       if (IS_ERR(clk)) {
+               dev_err(&pdev->dev, "failed to get TSC fck\n");
+               err = PTR_ERR(clk);
+               goto err_disable_clk;
+       }
+       clock_rate = clk_get_rate(clk);
+       clk_put(clk);
+       clk_value = clock_rate / ADC_CLK;
+       if (clk_value < MAX_CLK_DIV) {
+               dev_err(&pdev->dev, "clock input less than min clock requirement\n");
+               err = -EINVAL;
+               goto err_disable_clk;
+       }
+       /* TSCADC_CLKDIV needs to be configured to the value minus 1 */
+       clk_value = clk_value - 1;
+       tscadc_writel(tscadc, REG_CLKDIV, clk_value);
+
+       /* Set the control register bits */
+       ctrl = CNTRLREG_STEPCONFIGWRT |
+                       CNTRLREG_TSCENB |
+                       CNTRLREG_STEPID |
+                       CNTRLREG_4WIRE;
+       tscadc_writel(tscadc, REG_CTRL, ctrl);
+
+       /* Set register bits for Idle Config Mode */
+       tscadc_idle_config(tscadc);
+
+       /* Enable the TSC module enable bit */
+       ctrl = tscadc_readl(tscadc, REG_CTRL);
+       ctrl |= CNTRLREG_TSCSSENB;
+       tscadc_writel(tscadc, REG_CTRL, ctrl);
+
+       /* TSC Cell */
+       cell = &tscadc->cells[TSC_CELL];
+       cell->name = "tsc";
+       cell->platform_data = tscadc;
+       cell->pdata_size = sizeof(*tscadc);
+
+       /* ADC Cell */
+       cell = &tscadc->cells[ADC_CELL];
+       cell->name = "tiadc";
+       cell->platform_data = tscadc;
+       cell->pdata_size = sizeof(*tscadc);
+
+       err = mfd_add_devices(&pdev->dev, pdev->id, tscadc->cells,
+                       TSCADC_CELLS, NULL, 0, NULL);
+       if (err < 0)
+               goto err_disable_clk;
+
+       device_init_wakeup(&pdev->dev, true);
+       platform_set_drvdata(pdev, tscadc);
+
+       return 0;
+
+err_disable_clk:
+       pm_runtime_put_sync(&pdev->dev);
+       pm_runtime_disable(&pdev->dev);
+ret:
+       return err;
+}
+
+static int __devexit ti_tscadc_remove(struct platform_device *pdev)
+{
+       struct ti_tscadc_dev    *tscadc = platform_get_drvdata(pdev);
+
+       tscadc_writel(tscadc, REG_SE, 0x00);
+
+       pm_runtime_put_sync(&pdev->dev);
+       pm_runtime_disable(&pdev->dev);
+
+       mfd_remove_devices(tscadc->dev);
+
+       return 0;
+}
+
+#ifdef CONFIG_PM
+static int tscadc_suspend(struct device *dev)
+{
+       struct ti_tscadc_dev    *tscadc_dev = dev_get_drvdata(dev);
+
+       tscadc_writel(tscadc_dev, REG_SE, 0x00);
+       pm_runtime_put_sync(dev);
+
+       return 0;
+}
+
+static int tscadc_resume(struct device *dev)
+{
+       struct ti_tscadc_dev    *tscadc_dev = dev_get_drvdata(dev);
+       unsigned int restore, ctrl;
+
+       pm_runtime_get_sync(dev);
+
+       /* context restore */
+       ctrl = CNTRLREG_STEPCONFIGWRT | CNTRLREG_TSCENB |
+                       CNTRLREG_STEPID | CNTRLREG_4WIRE;
+       tscadc_writel(tscadc_dev, REG_CTRL, ctrl);
+       tscadc_idle_config(tscadc_dev);
+       tscadc_writel(tscadc_dev, REG_SE, STPENB_STEPENB);
+       restore = tscadc_readl(tscadc_dev, REG_CTRL);
+       tscadc_writel(tscadc_dev, REG_CTRL,
+                       (restore | CNTRLREG_TSCSSENB));
+
+       return 0;
+}
+
+static const struct dev_pm_ops tscadc_pm_ops = {
+       .suspend = tscadc_suspend,
+       .resume = tscadc_resume,
+};
+#define TSCADC_PM_OPS (&tscadc_pm_ops)
+#else
+#define TSCADC_PM_OPS NULL
+#endif
+
+static struct platform_driver ti_tscadc_driver = {
+       .driver = {
+               .name   = "ti_tscadc",
+               .owner  = THIS_MODULE,
+               .pm     = TSCADC_PM_OPS,
+       },
+       .probe  = ti_tscadc_probe,
+       .remove = __devexit_p(ti_tscadc_remove),
+
+};
+
+module_platform_driver(ti_tscadc_driver);
+
+MODULE_DESCRIPTION("TI touchscreen / ADC MFD controller driver");
+MODULE_AUTHOR("Rachna Patil <rachna@ti.com>");
+MODULE_LICENSE("GPL");
index 1b20349..409afa2 100644 (file)
@@ -86,9 +86,9 @@ static int tps6507x_i2c_probe(struct i2c_client *i2c,
                            const struct i2c_device_id *id)
 {
        struct tps6507x_dev *tps6507x;
-       int ret = 0;
 
-       tps6507x = kzalloc(sizeof(struct tps6507x_dev), GFP_KERNEL);
+       tps6507x = devm_kzalloc(&i2c->dev, sizeof(struct tps6507x_dev),
+                               GFP_KERNEL);
        if (tps6507x == NULL)
                return -ENOMEM;
 
@@ -98,19 +98,8 @@ static int tps6507x_i2c_probe(struct i2c_client *i2c,
        tps6507x->read_dev = tps6507x_i2c_read_device;
        tps6507x->write_dev = tps6507x_i2c_write_device;
 
-       ret = mfd_add_devices(tps6507x->dev, -1,
-                             tps6507x_devs, ARRAY_SIZE(tps6507x_devs),
-                             NULL, 0, NULL);
-
-       if (ret < 0)
-               goto err;
-
-       return ret;
-
-err:
-       mfd_remove_devices(tps6507x->dev);
-       kfree(tps6507x);
-       return ret;
+       return mfd_add_devices(tps6507x->dev, -1, tps6507x_devs,
+                              ARRAY_SIZE(tps6507x_devs), NULL, 0, NULL);
 }
 
 static int tps6507x_i2c_remove(struct i2c_client *i2c)
@@ -118,8 +107,6 @@ static int tps6507x_i2c_remove(struct i2c_client *i2c)
        struct tps6507x_dev *tps6507x = i2c_get_clientdata(i2c);
 
        mfd_remove_devices(tps6507x->dev);
-       kfree(tps6507x);
-
        return 0;
 }
 
index 382a857..8d12a8e 100644 (file)
@@ -25,7 +25,6 @@
 #include <linux/i2c.h>
 #include <linux/mfd/core.h>
 #include <linux/mfd/tps65090.h>
-#include <linux/regmap.h>
 #include <linux/err.h>
 
 #define NUM_INT_REG 2
 #define TPS65090_INT_MSK       0x2
 #define TPS65090_INT_MSK2      0x3
 
-struct tps65090_irq_data {
-       u8              mask_reg;
-       u8              mask_pos;
-};
-
-#define TPS65090_IRQ(_reg, _mask_pos)          \
-       {                                       \
-               .mask_reg       = (_reg),       \
-               .mask_pos       = (_mask_pos),  \
-       }
-
-static const struct tps65090_irq_data tps65090_irqs[] = {
-       [0]             = TPS65090_IRQ(0, 0),
-       [1]             = TPS65090_IRQ(0, 1),
-       [2]             = TPS65090_IRQ(0, 2),
-       [3]             = TPS65090_IRQ(0, 3),
-       [4]             = TPS65090_IRQ(0, 4),
-       [5]             = TPS65090_IRQ(0, 5),
-       [6]             = TPS65090_IRQ(0, 6),
-       [7]             = TPS65090_IRQ(0, 7),
-       [8]             = TPS65090_IRQ(1, 0),
-       [9]             = TPS65090_IRQ(1, 1),
-       [10]            = TPS65090_IRQ(1, 2),
-       [11]            = TPS65090_IRQ(1, 3),
-       [12]            = TPS65090_IRQ(1, 4),
-       [13]            = TPS65090_IRQ(1, 5),
-       [14]            = TPS65090_IRQ(1, 6),
-       [15]            = TPS65090_IRQ(1, 7),
-};
+#define TPS65090_INT1_MASK_VAC_STATUS_CHANGE           1
+#define TPS65090_INT1_MASK_VSYS_STATUS_CHANGE          2
+#define TPS65090_INT1_MASK_BAT_STATUS_CHANGE           3
+#define TPS65090_INT1_MASK_CHARGING_STATUS_CHANGE      4
+#define TPS65090_INT1_MASK_CHARGING_COMPLETE           5
+#define TPS65090_INT1_MASK_OVERLOAD_DCDC1              6
+#define TPS65090_INT1_MASK_OVERLOAD_DCDC2              7
+#define TPS65090_INT2_MASK_OVERLOAD_DCDC3              0
+#define TPS65090_INT2_MASK_OVERLOAD_FET1               1
+#define TPS65090_INT2_MASK_OVERLOAD_FET2               2
+#define TPS65090_INT2_MASK_OVERLOAD_FET3               3
+#define TPS65090_INT2_MASK_OVERLOAD_FET4               4
+#define TPS65090_INT2_MASK_OVERLOAD_FET5               5
+#define TPS65090_INT2_MASK_OVERLOAD_FET6               6
+#define TPS65090_INT2_MASK_OVERLOAD_FET7               7
 
 static struct mfd_cell tps65090s[] = {
        {
                .name = "tps65090-pmic",
        },
        {
-               .name = "tps65090-regulator",
+               .name = "tps65090-charger",
        },
 };
 
-int tps65090_write(struct device *dev, int reg, uint8_t val)
-{
-       struct tps65090 *tps = dev_get_drvdata(dev);
-       return regmap_write(tps->rmap, reg, val);
-}
-EXPORT_SYMBOL_GPL(tps65090_write);
-
-int tps65090_read(struct device *dev, int reg, uint8_t *val)
-{
-       struct tps65090 *tps = dev_get_drvdata(dev);
-       unsigned int temp_val;
-       int ret;
-       ret = regmap_read(tps->rmap, reg, &temp_val);
-       if (!ret)
-               *val = temp_val;
-       return ret;
-}
-EXPORT_SYMBOL_GPL(tps65090_read);
-
-int tps65090_set_bits(struct device *dev, int reg, uint8_t bit_num)
-{
-       struct tps65090 *tps = dev_get_drvdata(dev);
-       return regmap_update_bits(tps->rmap, reg, BIT(bit_num), ~0u);
-}
-EXPORT_SYMBOL_GPL(tps65090_set_bits);
-
-int tps65090_clr_bits(struct device *dev, int reg, uint8_t bit_num)
-{
-       struct tps65090 *tps = dev_get_drvdata(dev);
-       return regmap_update_bits(tps->rmap, reg, BIT(bit_num), 0u);
-}
-EXPORT_SYMBOL_GPL(tps65090_clr_bits);
-
-static void tps65090_irq_lock(struct irq_data *data)
-{
-       struct tps65090 *tps65090 = irq_data_get_irq_chip_data(data);
-
-       mutex_lock(&tps65090->irq_lock);
-}
-
-static void tps65090_irq_mask(struct irq_data *irq_data)
-{
-       struct tps65090 *tps65090 = irq_data_get_irq_chip_data(irq_data);
-       unsigned int __irq = irq_data->hwirq;
-       const struct tps65090_irq_data *data = &tps65090_irqs[__irq];
-
-       tps65090_set_bits(tps65090->dev, (TPS65090_INT_MSK + data->mask_reg),
-               data->mask_pos);
-}
-
-static void tps65090_irq_unmask(struct irq_data *irq_data)
-{
-       struct tps65090 *tps65090 = irq_data_get_irq_chip_data(irq_data);
-       unsigned int __irq = irq_data->irq - tps65090->irq_base;
-       const struct tps65090_irq_data *data = &tps65090_irqs[__irq];
-
-       tps65090_clr_bits(tps65090->dev, (TPS65090_INT_MSK + data->mask_reg),
-               data->mask_pos);
-}
-
-static void tps65090_irq_sync_unlock(struct irq_data *data)
-{
-       struct tps65090 *tps65090 = irq_data_get_irq_chip_data(data);
-
-       mutex_unlock(&tps65090->irq_lock);
-}
-
-static irqreturn_t tps65090_irq(int irq, void *data)
-{
-       struct tps65090 *tps65090 = data;
-       int ret = 0;
-       u8 status, mask;
-       unsigned long int acks = 0;
-       int i;
-
-       for (i = 0; i < NUM_INT_REG; i++) {
-               ret = tps65090_read(tps65090->dev, TPS65090_INT_MSK + i, &mask);
-               if (ret < 0) {
-                       dev_err(tps65090->dev,
-                               "failed to read mask reg [addr:%d]\n",
-                               TPS65090_INT_MSK + i);
-                       return IRQ_NONE;
-               }
-               ret = tps65090_read(tps65090->dev, TPS65090_INT_STS + i,
-                       &status);
-               if (ret < 0) {
-                       dev_err(tps65090->dev,
-                               "failed to read status reg [addr:%d]\n",
-                                TPS65090_INT_STS + i);
-                       return IRQ_NONE;
-               }
-               if (status) {
-                       /* Ack only those interrupts which are not masked */
-                       status &= (~mask);
-                       ret = tps65090_write(tps65090->dev,
-                                       TPS65090_INT_STS + i, status);
-                       if (ret < 0) {
-                               dev_err(tps65090->dev,
-                                       "failed to write interrupt status\n");
-                               return IRQ_NONE;
-                       }
-                       acks |= (status << (i * 8));
-               }
-       }
-
-       for_each_set_bit(i, &acks, ARRAY_SIZE(tps65090_irqs))
-               handle_nested_irq(tps65090->irq_base + i);
-       return acks ? IRQ_HANDLED : IRQ_NONE;
-}
-
-static int tps65090_irq_init(struct tps65090 *tps65090, int irq,
-       int irq_base)
-{
-       int i, ret;
-
-       if (!irq_base) {
-               dev_err(tps65090->dev, "IRQ base not set\n");
-               return -EINVAL;
-       }
-
-       mutex_init(&tps65090->irq_lock);
-
-       for (i = 0; i < NUM_INT_REG; i++)
-               tps65090_write(tps65090->dev, TPS65090_INT_MSK + i, 0xFF);
-
-       for (i = 0; i < NUM_INT_REG; i++)
-               tps65090_write(tps65090->dev, TPS65090_INT_STS + i, 0xff);
-
-       tps65090->irq_base = irq_base;
-       tps65090->irq_chip.name = "tps65090";
-       tps65090->irq_chip.irq_mask = tps65090_irq_mask;
-       tps65090->irq_chip.irq_unmask = tps65090_irq_unmask;
-       tps65090->irq_chip.irq_bus_lock = tps65090_irq_lock;
-       tps65090->irq_chip.irq_bus_sync_unlock = tps65090_irq_sync_unlock;
-
-       for (i = 0; i < ARRAY_SIZE(tps65090_irqs); i++) {
-               int __irq = i + tps65090->irq_base;
-               irq_set_chip_data(__irq, tps65090);
-               irq_set_chip_and_handler(__irq, &tps65090->irq_chip,
-                                        handle_simple_irq);
-               irq_set_nested_thread(__irq, 1);
-#ifdef CONFIG_ARM
-               set_irq_flags(__irq, IRQF_VALID);
-#endif
-       }
-
-       ret = request_threaded_irq(irq, NULL, tps65090_irq, IRQF_ONESHOT,
-                               "tps65090", tps65090);
-       if (!ret) {
-               device_init_wakeup(tps65090->dev, 1);
-               enable_irq_wake(irq);
-       }
+static const struct regmap_irq tps65090_irqs[] = {
+       /* INT1 IRQs*/
+       [TPS65090_IRQ_VAC_STATUS_CHANGE] = {
+                       .mask = TPS65090_INT1_MASK_VAC_STATUS_CHANGE,
+       },
+       [TPS65090_IRQ_VSYS_STATUS_CHANGE] = {
+                       .mask = TPS65090_INT1_MASK_VSYS_STATUS_CHANGE,
+       },
+       [TPS65090_IRQ_BAT_STATUS_CHANGE] = {
+                       .mask = TPS65090_INT1_MASK_BAT_STATUS_CHANGE,
+       },
+       [TPS65090_IRQ_CHARGING_STATUS_CHANGE] = {
+                       .mask = TPS65090_INT1_MASK_CHARGING_STATUS_CHANGE,
+       },
+       [TPS65090_IRQ_CHARGING_COMPLETE] = {
+                       .mask = TPS65090_INT1_MASK_CHARGING_COMPLETE,
+       },
+       [TPS65090_IRQ_OVERLOAD_DCDC1] = {
+                       .mask = TPS65090_INT1_MASK_OVERLOAD_DCDC1,
+       },
+       [TPS65090_IRQ_OVERLOAD_DCDC2] = {
+                       .mask = TPS65090_INT1_MASK_OVERLOAD_DCDC2,
+       },
+       /* INT2 IRQs*/
+       [TPS65090_IRQ_OVERLOAD_DCDC3] = {
+                       .reg_offset = 1,
+                       .mask = TPS65090_INT2_MASK_OVERLOAD_DCDC3,
+       },
+       [TPS65090_IRQ_OVERLOAD_FET1] = {
+                       .reg_offset = 1,
+                       .mask = TPS65090_INT2_MASK_OVERLOAD_FET1,
+       },
+       [TPS65090_IRQ_OVERLOAD_FET2] = {
+                       .reg_offset = 1,
+                       .mask = TPS65090_INT2_MASK_OVERLOAD_FET2,
+       },
+       [TPS65090_IRQ_OVERLOAD_FET3] = {
+                       .reg_offset = 1,
+                       .mask = TPS65090_INT2_MASK_OVERLOAD_FET3,
+       },
+       [TPS65090_IRQ_OVERLOAD_FET4] = {
+                       .reg_offset = 1,
+                       .mask = TPS65090_INT2_MASK_OVERLOAD_FET4,
+       },
+       [TPS65090_IRQ_OVERLOAD_FET5] = {
+                       .reg_offset = 1,
+                       .mask = TPS65090_INT2_MASK_OVERLOAD_FET5,
+       },
+       [TPS65090_IRQ_OVERLOAD_FET6] = {
+                       .reg_offset = 1,
+                       .mask = TPS65090_INT2_MASK_OVERLOAD_FET6,
+       },
+       [TPS65090_IRQ_OVERLOAD_FET7] = {
+                       .reg_offset = 1,
+                       .mask = TPS65090_INT2_MASK_OVERLOAD_FET7,
+       },
+};
 
-       return ret;
-}
+static struct regmap_irq_chip tps65090_irq_chip = {
+       .name = "tps65090",
+       .irqs = tps65090_irqs,
+       .num_irqs = ARRAY_SIZE(tps65090_irqs),
+       .num_regs = NUM_INT_REG,
+       .status_base = TPS65090_INT_STS,
+       .mask_base = TPS65090_INT_MSK,
+       .mask_invert = true,
+};
 
 static bool is_volatile_reg(struct device *dev, unsigned int reg)
 {
-       if (reg == TPS65090_INT_STS)
+       if ((reg == TPS65090_INT_STS) || (reg == TPS65090_INT_STS2))
                return true;
        else
                return false;
@@ -263,36 +160,36 @@ static int tps65090_i2c_probe(struct i2c_client *client,
                return -EINVAL;
        }
 
-       tps65090 = devm_kzalloc(&client->dev, sizeof(struct tps65090),
-               GFP_KERNEL);
-       if (tps65090 == NULL)
+       tps65090 = devm_kzalloc(&client->dev, sizeof(*tps65090), GFP_KERNEL);
+       if (!tps65090) {
+               dev_err(&client->dev, "mem alloc for tps65090 failed\n");
                return -ENOMEM;
+       }
 
-       tps65090->client = client;
        tps65090->dev = &client->dev;
        i2c_set_clientdata(client, tps65090);
 
-       mutex_init(&tps65090->lock);
-
-       if (client->irq) {
-               ret = tps65090_irq_init(tps65090, client->irq, pdata->irq_base);
-               if (ret) {
-                       dev_err(&client->dev, "IRQ init failed with err: %d\n",
-                               ret);
-                       goto err_exit;
-               }
-       }
-
-       tps65090->rmap = devm_regmap_init_i2c(tps65090->client,
-                                             &tps65090_regmap_config);
+       tps65090->rmap = devm_regmap_init_i2c(client, &tps65090_regmap_config);
        if (IS_ERR(tps65090->rmap)) {
                ret = PTR_ERR(tps65090->rmap);
                dev_err(&client->dev, "regmap_init failed with err: %d\n", ret);
-               goto err_irq_exit;
+               return ret;
+       }
+
+       if (client->irq) {
+               ret = regmap_add_irq_chip(tps65090->rmap, client->irq,
+                       IRQF_ONESHOT | IRQF_TRIGGER_LOW, pdata->irq_base,
+                       &tps65090_irq_chip, &tps65090->irq_data);
+                       if (ret) {
+                               dev_err(&client->dev,
+                                       "IRQ init failed with err: %d\n", ret);
+                       return ret;
+               }
        }
 
        ret = mfd_add_devices(tps65090->dev, -1, tps65090s,
-                             ARRAY_SIZE(tps65090s), NULL, 0, NULL);
+               ARRAY_SIZE(tps65090s), NULL,
+               regmap_irq_chip_get_base(tps65090->irq_data), NULL);
        if (ret) {
                dev_err(&client->dev, "add mfd devices failed with err: %d\n",
                        ret);
@@ -303,8 +200,7 @@ static int tps65090_i2c_probe(struct i2c_client *client,
 
 err_irq_exit:
        if (client->irq)
-               free_irq(client->irq, tps65090);
-err_exit:
+               regmap_del_irq_chip(client->irq, tps65090->irq_data);
        return ret;
 }
 
@@ -314,7 +210,7 @@ static int tps65090_i2c_remove(struct i2c_client *client)
 
        mfd_remove_devices(tps65090->dev);
        if (client->irq)
-               free_irq(client->irq, tps65090);
+               regmap_del_irq_chip(client->irq, tps65090->irq_data);
 
        return 0;
 }
index e14e252..b8f4864 100644 (file)
@@ -160,6 +160,7 @@ static int tps65217_probe(struct i2c_client *client,
        unsigned int version;
        unsigned int chip_id = ids->driver_data;
        const struct of_device_id *match;
+       bool status_off = false;
        int ret;
 
        if (client->dev.of_node) {
@@ -170,6 +171,8 @@ static int tps65217_probe(struct i2c_client *client,
                        return -EINVAL;
                }
                chip_id = (unsigned int)match->data;
+               status_off = of_property_read_bool(client->dev.of_node,
+                                       "ti,pmic-shutdown-controller");
        }
 
        if (!chip_id) {
@@ -207,6 +210,15 @@ static int tps65217_probe(struct i2c_client *client,
                return ret;
        }
 
+       /* Set the PMIC to shutdown on PWR_EN toggle */
+       if (status_off) {
+               ret = tps65217_set_bits(tps, TPS65217_REG_STATUS,
+                               TPS65217_STATUS_OFF, TPS65217_STATUS_OFF,
+                               TPS65217_PROTECT_NONE);
+               if (ret)
+                       dev_warn(tps->dev, "unable to set the status OFF\n");
+       }
+
        dev_info(tps->dev, "TPS65217 ID %#x version 1.%d\n",
                        (version & TPS65217_CHIPID_CHIP_MASK) >> 4,
                        version & TPS65217_CHIPID_REV_MASK);
index 87ba7ad..721b918 100644 (file)
 
 #include <linux/interrupt.h>
 #include <linux/irq.h>
+#include <linux/irqdomain.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
 #include <linux/slab.h>
 #include <linux/err.h>
 #include <linux/i2c.h>
+#include <linux/platform_device.h>
 #include <linux/regmap.h>
 
 #include <linux/mfd/core.h>
@@ -92,6 +94,14 @@ static const struct tps6586x_irq_data tps6586x_irqs[] = {
        [TPS6586X_INT_RTC_ALM2] = TPS6586X_IRQ(TPS6586X_INT_MASK4, 1 << 1),
 };
 
+static struct resource tps6586x_rtc_resources[] = {
+       {
+               .start  = TPS6586X_INT_RTC_ALM1,
+               .end    = TPS6586X_INT_RTC_ALM1,
+               .flags  = IORESOURCE_IRQ,
+       },
+};
+
 static struct mfd_cell tps6586x_cell[] = {
        {
                .name = "tps6586x-gpio",
@@ -101,6 +111,8 @@ static struct mfd_cell tps6586x_cell[] = {
        },
        {
                .name = "tps6586x-rtc",
+               .num_resources = ARRAY_SIZE(tps6586x_rtc_resources),
+               .resources = &tps6586x_rtc_resources[0],
        },
        {
                .name = "tps6586x-onkey",
@@ -117,6 +129,7 @@ struct tps6586x {
        int                     irq_base;
        u32                     irq_en;
        u8                      mask_reg[5];
+       struct irq_domain       *irq_domain;
 };
 
 static inline struct tps6586x *dev_to_tps6586x(struct device *dev)
@@ -185,6 +198,14 @@ int tps6586x_update(struct device *dev, int reg, uint8_t val, uint8_t mask)
 }
 EXPORT_SYMBOL_GPL(tps6586x_update);
 
+int tps6586x_irq_get_virq(struct device *dev, int irq)
+{
+       struct tps6586x *tps6586x = dev_to_tps6586x(dev);
+
+       return irq_create_mapping(tps6586x->irq_domain, irq);
+}
+EXPORT_SYMBOL_GPL(tps6586x_irq_get_virq);
+
 static int __remove_subdev(struct device *dev, void *unused)
 {
        platform_device_unregister(to_platform_device(dev));
@@ -206,7 +227,7 @@ static void tps6586x_irq_lock(struct irq_data *data)
 static void tps6586x_irq_enable(struct irq_data *irq_data)
 {
        struct tps6586x *tps6586x = irq_data_get_irq_chip_data(irq_data);
-       unsigned int __irq = irq_data->irq - tps6586x->irq_base;
+       unsigned int __irq = irq_data->hwirq;
        const struct tps6586x_irq_data *data = &tps6586x_irqs[__irq];
 
        tps6586x->mask_reg[data->mask_reg] &= ~data->mask_mask;
@@ -217,7 +238,7 @@ static void tps6586x_irq_disable(struct irq_data *irq_data)
 {
        struct tps6586x *tps6586x = irq_data_get_irq_chip_data(irq_data);
 
-       unsigned int __irq = irq_data->irq - tps6586x->irq_base;
+       unsigned int __irq = irq_data->hwirq;
        const struct tps6586x_irq_data *data = &tps6586x_irqs[__irq];
 
        tps6586x->mask_reg[data->mask_reg] |= data->mask_mask;
@@ -240,6 +261,39 @@ static void tps6586x_irq_sync_unlock(struct irq_data *data)
        mutex_unlock(&tps6586x->irq_lock);
 }
 
+static struct irq_chip tps6586x_irq_chip = {
+       .name = "tps6586x",
+       .irq_bus_lock = tps6586x_irq_lock,
+       .irq_bus_sync_unlock = tps6586x_irq_sync_unlock,
+       .irq_disable = tps6586x_irq_disable,
+       .irq_enable = tps6586x_irq_enable,
+};
+
+static int tps6586x_irq_map(struct irq_domain *h, unsigned int virq,
+                               irq_hw_number_t hw)
+{
+       struct tps6586x *tps6586x = h->host_data;
+
+       irq_set_chip_data(virq, tps6586x);
+       irq_set_chip_and_handler(virq, &tps6586x_irq_chip, handle_simple_irq);
+       irq_set_nested_thread(virq, 1);
+
+       /* ARM needs us to explicitly flag the IRQ as valid
+        * and will set them noprobe when we do so. */
+#ifdef CONFIG_ARM
+       set_irq_flags(virq, IRQF_VALID);
+#else
+       irq_set_noprobe(virq);
+#endif
+
+       return 0;
+}
+
+static struct irq_domain_ops tps6586x_domain_ops = {
+       .map    = tps6586x_irq_map,
+       .xlate  = irq_domain_xlate_twocell,
+};
+
 static irqreturn_t tps6586x_irq(int irq, void *data)
 {
        struct tps6586x *tps6586x = data;
@@ -260,7 +314,8 @@ static irqreturn_t tps6586x_irq(int irq, void *data)
                int i = __ffs(acks);
 
                if (tps6586x->irq_en & (1 << i))
-                       handle_nested_irq(tps6586x->irq_base + i);
+                       handle_nested_irq(
+                               irq_find_mapping(tps6586x->irq_domain, i));
 
                acks &= ~(1 << i);
        }
@@ -273,11 +328,8 @@ static int tps6586x_irq_init(struct tps6586x *tps6586x, int irq,
 {
        int i, ret;
        u8 tmp[4];
-
-       if (!irq_base) {
-               dev_warn(tps6586x->dev, "No interrupt support on IRQ base\n");
-               return -EINVAL;
-       }
+       int new_irq_base;
+       int irq_num = ARRAY_SIZE(tps6586x_irqs);
 
        mutex_init(&tps6586x->irq_lock);
        for (i = 0; i < 5; i++) {
@@ -287,25 +339,24 @@ static int tps6586x_irq_init(struct tps6586x *tps6586x, int irq,
 
        tps6586x_reads(tps6586x->dev, TPS6586X_INT_ACK1, sizeof(tmp), tmp);
 
-       tps6586x->irq_base = irq_base;
-
-       tps6586x->irq_chip.name = "tps6586x";
-       tps6586x->irq_chip.irq_enable = tps6586x_irq_enable;
-       tps6586x->irq_chip.irq_disable = tps6586x_irq_disable;
-       tps6586x->irq_chip.irq_bus_lock = tps6586x_irq_lock;
-       tps6586x->irq_chip.irq_bus_sync_unlock = tps6586x_irq_sync_unlock;
-
-       for (i = 0; i < ARRAY_SIZE(tps6586x_irqs); i++) {
-               int __irq = i + tps6586x->irq_base;
-               irq_set_chip_data(__irq, tps6586x);
-               irq_set_chip_and_handler(__irq, &tps6586x->irq_chip,
-                                        handle_simple_irq);
-               irq_set_nested_thread(__irq, 1);
-#ifdef CONFIG_ARM
-               set_irq_flags(__irq, IRQF_VALID);
-#endif
+       if  (irq_base > 0) {
+               new_irq_base = irq_alloc_descs(irq_base, 0, irq_num, -1);
+               if (new_irq_base < 0) {
+                       dev_err(tps6586x->dev,
+                               "Failed to alloc IRQs: %d\n", new_irq_base);
+                       return new_irq_base;
+               }
+       } else {
+               new_irq_base = 0;
        }
 
+       tps6586x->irq_domain = irq_domain_add_simple(tps6586x->dev->of_node,
+                               irq_num, new_irq_base, &tps6586x_domain_ops,
+                               tps6586x);
+       if (!tps6586x->irq_domain) {
+               dev_err(tps6586x->dev, "Failed to create IRQ domain\n");
+               return -ENOMEM;
+       }
        ret = request_threaded_irq(irq, NULL, tps6586x_irq, IRQF_ONESHOT,
                                   "tps6586x", tps6586x);
 
@@ -461,7 +512,7 @@ static int tps6586x_i2c_probe(struct i2c_client *client,
 
        ret = mfd_add_devices(tps6586x->dev, -1,
                              tps6586x_cell, ARRAY_SIZE(tps6586x_cell),
-                             NULL, 0, NULL);
+                             NULL, 0, tps6586x->irq_domain);
        if (ret < 0) {
                dev_err(&client->dev, "mfd_add_devices failed: %d\n", ret);
                goto err_mfd_add;
diff --git a/drivers/mfd/tps65910-irq.c b/drivers/mfd/tps65910-irq.c
deleted file mode 100644 (file)
index 09aab3e..0000000
+++ /dev/null
@@ -1,260 +0,0 @@
-/*
- * tps65910-irq.c  --  TI TPS6591x
- *
- * Copyright 2010 Texas Instruments Inc.
- *
- * Author: Graeme Gregory <gg@slimlogic.co.uk>
- * Author: Jorge Eduardo Candelaria <jedu@slimlogic.co.uk>
- *
- *  This program is free software; you can redistribute it and/or modify it
- *  under  the terms of the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the License, or (at your
- *  option) any later version.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/bug.h>
-#include <linux/device.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/irqdomain.h>
-#include <linux/gpio.h>
-#include <linux/mfd/tps65910.h>
-
-/*
- * This is a threaded IRQ handler so can access I2C/SPI.  Since all
- * interrupts are clear on read the IRQ line will be reasserted and
- * the physical IRQ will be handled again if another interrupt is
- * asserted while we run - in the normal course of events this is a
- * rare occurrence so we save I2C/SPI reads.  We're also assuming that
- * it's rare to get lots of interrupts firing simultaneously so try to
- * minimise I/O.
- */
-static irqreturn_t tps65910_irq(int irq, void *irq_data)
-{
-       struct tps65910 *tps65910 = irq_data;
-       unsigned int reg;
-       u32 irq_sts;
-       u32 irq_mask;
-       int i;
-
-       tps65910_reg_read(tps65910, TPS65910_INT_STS, &reg);
-       irq_sts = reg;
-       tps65910_reg_read(tps65910, TPS65910_INT_STS2, &reg);
-       irq_sts |= reg << 8;
-       switch (tps65910_chip_id(tps65910)) {
-       case TPS65911:
-               tps65910_reg_read(tps65910, TPS65910_INT_STS3, &reg);
-               irq_sts |= reg << 16;
-       }
-
-       tps65910_reg_read(tps65910, TPS65910_INT_MSK, &reg);
-       irq_mask = reg;
-       tps65910_reg_read(tps65910, TPS65910_INT_MSK2, &reg);
-       irq_mask |= reg << 8;
-       switch (tps65910_chip_id(tps65910)) {
-       case TPS65911:
-               tps65910_reg_read(tps65910, TPS65910_INT_MSK3, &reg);
-               irq_mask |= reg << 16;
-       }
-
-       irq_sts &= ~irq_mask;
-
-       if (!irq_sts)
-               return IRQ_NONE;
-
-       for (i = 0; i < tps65910->irq_num; i++) {
-
-               if (!(irq_sts & (1 << i)))
-                       continue;
-
-               handle_nested_irq(irq_find_mapping(tps65910->domain, i));
-       }
-
-       /* Write the STS register back to clear IRQs we handled */
-       reg = irq_sts & 0xFF;
-       irq_sts >>= 8;
-       tps65910_reg_write(tps65910, TPS65910_INT_STS, reg);
-       reg = irq_sts & 0xFF;
-       tps65910_reg_write(tps65910, TPS65910_INT_STS2, reg);
-       switch (tps65910_chip_id(tps65910)) {
-       case TPS65911:
-               reg = irq_sts >> 8;
-               tps65910_reg_write(tps65910, TPS65910_INT_STS3, reg);
-       }
-
-       return IRQ_HANDLED;
-}
-
-static void tps65910_irq_lock(struct irq_data *data)
-{
-       struct tps65910 *tps65910 = irq_data_get_irq_chip_data(data);
-
-       mutex_lock(&tps65910->irq_lock);
-}
-
-static void tps65910_irq_sync_unlock(struct irq_data *data)
-{
-       struct tps65910 *tps65910 = irq_data_get_irq_chip_data(data);
-       u32 reg_mask;
-       unsigned int reg;
-
-       tps65910_reg_read(tps65910, TPS65910_INT_MSK, &reg);
-       reg_mask = reg;
-       tps65910_reg_read(tps65910, TPS65910_INT_MSK2, &reg);
-       reg_mask |= reg << 8;
-       switch (tps65910_chip_id(tps65910)) {
-       case TPS65911:
-               tps65910_reg_read(tps65910, TPS65910_INT_MSK3, &reg);
-               reg_mask |= reg << 16;
-       }
-
-       if (tps65910->irq_mask != reg_mask) {
-               reg = tps65910->irq_mask & 0xFF;
-               tps65910_reg_write(tps65910, TPS65910_INT_MSK, reg);
-               reg = tps65910->irq_mask >> 8 & 0xFF;
-               tps65910_reg_write(tps65910, TPS65910_INT_MSK2, reg);
-               switch (tps65910_chip_id(tps65910)) {
-               case TPS65911:
-                       reg = tps65910->irq_mask >> 16;
-                       tps65910_reg_write(tps65910, TPS65910_INT_MSK3, reg);
-               }
-       }
-       mutex_unlock(&tps65910->irq_lock);
-}
-
-static void tps65910_irq_enable(struct irq_data *data)
-{
-       struct tps65910 *tps65910 = irq_data_get_irq_chip_data(data);
-
-       tps65910->irq_mask &= ~(1 << data->hwirq);
-}
-
-static void tps65910_irq_disable(struct irq_data *data)
-{
-       struct tps65910 *tps65910 = irq_data_get_irq_chip_data(data);
-
-       tps65910->irq_mask |= (1 << data->hwirq);
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int tps65910_irq_set_wake(struct irq_data *data, unsigned int enable)
-{
-       struct tps65910 *tps65910 = irq_data_get_irq_chip_data(data);
-       return irq_set_irq_wake(tps65910->chip_irq, enable);
-}
-#else
-#define tps65910_irq_set_wake NULL
-#endif
-
-static struct irq_chip tps65910_irq_chip = {
-       .name = "tps65910",
-       .irq_bus_lock = tps65910_irq_lock,
-       .irq_bus_sync_unlock = tps65910_irq_sync_unlock,
-       .irq_disable = tps65910_irq_disable,
-       .irq_enable = tps65910_irq_enable,
-       .irq_set_wake = tps65910_irq_set_wake,
-};
-
-static int tps65910_irq_map(struct irq_domain *h, unsigned int virq,
-                               irq_hw_number_t hw)
-{
-       struct tps65910 *tps65910 = h->host_data;
-
-       irq_set_chip_data(virq, tps65910);
-       irq_set_chip_and_handler(virq, &tps65910_irq_chip, handle_edge_irq);
-       irq_set_nested_thread(virq, 1);
-
-       /* ARM needs us to explicitly flag the IRQ as valid
-        * and will set them noprobe when we do so. */
-#ifdef CONFIG_ARM
-       set_irq_flags(virq, IRQF_VALID);
-#else
-       irq_set_noprobe(virq);
-#endif
-
-       return 0;
-}
-
-static struct irq_domain_ops tps65910_domain_ops = {
-       .map    = tps65910_irq_map,
-       .xlate  = irq_domain_xlate_twocell,
-};
-
-int tps65910_irq_init(struct tps65910 *tps65910, int irq,
-                   struct tps65910_platform_data *pdata)
-{
-       int ret;
-       int flags = IRQF_ONESHOT;
-
-       if (!irq) {
-               dev_warn(tps65910->dev, "No interrupt support, no core IRQ\n");
-               return -EINVAL;
-       }
-
-       if (!pdata) {
-               dev_warn(tps65910->dev, "No interrupt support, no pdata\n");
-               return -EINVAL;
-       }
-
-       switch (tps65910_chip_id(tps65910)) {
-       case TPS65910:
-               tps65910->irq_num = TPS65910_NUM_IRQ;
-               break;
-       case TPS65911:
-               tps65910->irq_num = TPS65911_NUM_IRQ;
-               break;
-       }
-
-       if (pdata->irq_base > 0) {
-               pdata->irq_base = irq_alloc_descs(pdata->irq_base, 0,
-                                       tps65910->irq_num, -1);
-               if (pdata->irq_base < 0) {
-                       dev_warn(tps65910->dev, "Failed to alloc IRQs: %d\n",
-                                       pdata->irq_base);
-                       return pdata->irq_base;
-               }
-       }
-
-       tps65910->irq_mask = 0xFFFFFF;
-
-       mutex_init(&tps65910->irq_lock);
-       tps65910->chip_irq = irq;
-       tps65910->irq_base = pdata->irq_base;
-
-       if (pdata->irq_base > 0)
-               tps65910->domain = irq_domain_add_legacy(tps65910->dev->of_node,
-                                       tps65910->irq_num,
-                                       pdata->irq_base,
-                                       0,
-                                       &tps65910_domain_ops, tps65910);
-       else
-               tps65910->domain = irq_domain_add_linear(tps65910->dev->of_node,
-                                       tps65910->irq_num,
-                                       &tps65910_domain_ops, tps65910);
-
-       if (!tps65910->domain) {
-               dev_err(tps65910->dev, "Failed to create IRQ domain\n");
-               return -ENOMEM;
-       }
-
-       ret = request_threaded_irq(irq, NULL, tps65910_irq, flags,
-                                  "tps65910", tps65910);
-
-       irq_set_irq_type(irq, IRQ_TYPE_LEVEL_LOW);
-
-       if (ret != 0)
-               dev_err(tps65910->dev, "Failed to request IRQ: %d\n", ret);
-
-       return ret;
-}
-
-int tps65910_irq_exit(struct tps65910 *tps65910)
-{
-       if (tps65910->chip_irq)
-               free_irq(tps65910->chip_irq, tps65910);
-       return 0;
-}
index ce05465..d792772 100644 (file)
@@ -19,6 +19,9 @@
 #include <linux/err.h>
 #include <linux/slab.h>
 #include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
 #include <linux/mfd/core.h>
 #include <linux/regmap.h>
 #include <linux/mfd/tps65910.h>
@@ -50,6 +53,219 @@ static struct mfd_cell tps65910s[] = {
 };
 
 
+static const struct regmap_irq tps65911_irqs[] = {
+       /* INT_STS */
+       [TPS65911_IRQ_PWRHOLD_F] = {
+               .mask = INT_MSK_PWRHOLD_F_IT_MSK_MASK,
+               .reg_offset = 0,
+       },
+       [TPS65911_IRQ_VBAT_VMHI] = {
+               .mask = INT_MSK_VMBHI_IT_MSK_MASK,
+               .reg_offset = 0,
+       },
+       [TPS65911_IRQ_PWRON] = {
+               .mask = INT_MSK_PWRON_IT_MSK_MASK,
+               .reg_offset = 0,
+       },
+       [TPS65911_IRQ_PWRON_LP] = {
+               .mask = INT_MSK_PWRON_LP_IT_MSK_MASK,
+               .reg_offset = 0,
+       },
+       [TPS65911_IRQ_PWRHOLD_R] = {
+               .mask = INT_MSK_PWRHOLD_R_IT_MSK_MASK,
+               .reg_offset = 0,
+       },
+       [TPS65911_IRQ_HOTDIE] = {
+               .mask = INT_MSK_HOTDIE_IT_MSK_MASK,
+               .reg_offset = 0,
+       },
+       [TPS65911_IRQ_RTC_ALARM] = {
+               .mask = INT_MSK_RTC_ALARM_IT_MSK_MASK,
+               .reg_offset = 0,
+       },
+       [TPS65911_IRQ_RTC_PERIOD] = {
+               .mask = INT_MSK_RTC_PERIOD_IT_MSK_MASK,
+               .reg_offset = 0,
+       },
+
+       /* INT_STS2 */
+       [TPS65911_IRQ_GPIO0_R] = {
+               .mask = INT_MSK2_GPIO0_R_IT_MSK_MASK,
+               .reg_offset = 1,
+       },
+       [TPS65911_IRQ_GPIO0_F] = {
+               .mask = INT_MSK2_GPIO0_F_IT_MSK_MASK,
+               .reg_offset = 1,
+       },
+       [TPS65911_IRQ_GPIO1_R] = {
+               .mask = INT_MSK2_GPIO1_R_IT_MSK_MASK,
+               .reg_offset = 1,
+       },
+       [TPS65911_IRQ_GPIO1_F] = {
+               .mask = INT_MSK2_GPIO1_F_IT_MSK_MASK,
+               .reg_offset = 1,
+       },
+       [TPS65911_IRQ_GPIO2_R] = {
+               .mask = INT_MSK2_GPIO2_R_IT_MSK_MASK,
+               .reg_offset = 1,
+       },
+       [TPS65911_IRQ_GPIO2_F] = {
+               .mask = INT_MSK2_GPIO2_F_IT_MSK_MASK,
+               .reg_offset = 1,
+       },
+       [TPS65911_IRQ_GPIO3_R] = {
+               .mask = INT_MSK2_GPIO3_R_IT_MSK_MASK,
+               .reg_offset = 1,
+       },
+       [TPS65911_IRQ_GPIO3_F] = {
+               .mask = INT_MSK2_GPIO3_F_IT_MSK_MASK,
+               .reg_offset = 1,
+       },
+
+       /* INT_STS2 */
+       [TPS65911_IRQ_GPIO4_R] = {
+               .mask = INT_MSK3_GPIO4_R_IT_MSK_MASK,
+               .reg_offset = 2,
+       },
+       [TPS65911_IRQ_GPIO4_F] = {
+               .mask = INT_MSK3_GPIO4_F_IT_MSK_MASK,
+               .reg_offset = 2,
+       },
+       [TPS65911_IRQ_GPIO5_R] = {
+               .mask = INT_MSK3_GPIO5_R_IT_MSK_MASK,
+               .reg_offset = 2,
+       },
+       [TPS65911_IRQ_GPIO5_F] = {
+               .mask = INT_MSK3_GPIO5_F_IT_MSK_MASK,
+               .reg_offset = 2,
+       },
+       [TPS65911_IRQ_WTCHDG] = {
+               .mask = INT_MSK3_WTCHDG_IT_MSK_MASK,
+               .reg_offset = 2,
+       },
+       [TPS65911_IRQ_VMBCH2_H] = {
+               .mask = INT_MSK3_VMBCH2_H_IT_MSK_MASK,
+               .reg_offset = 2,
+       },
+       [TPS65911_IRQ_VMBCH2_L] = {
+               .mask = INT_MSK3_VMBCH2_L_IT_MSK_MASK,
+               .reg_offset = 2,
+       },
+       [TPS65911_IRQ_PWRDN] = {
+               .mask = INT_MSK3_PWRDN_IT_MSK_MASK,
+               .reg_offset = 2,
+       },
+};
+
+static const struct regmap_irq tps65910_irqs[] = {
+       /* INT_STS */
+       [TPS65910_IRQ_VBAT_VMBDCH] = {
+               .mask = TPS65910_INT_MSK_VMBDCH_IT_MSK_MASK,
+               .reg_offset = 0,
+       },
+       [TPS65910_IRQ_VBAT_VMHI] = {
+               .mask = TPS65910_INT_MSK_VMBHI_IT_MSK_MASK,
+               .reg_offset = 0,
+       },
+       [TPS65910_IRQ_PWRON] = {
+               .mask = TPS65910_INT_MSK_PWRON_IT_MSK_MASK,
+               .reg_offset = 0,
+       },
+       [TPS65910_IRQ_PWRON_LP] = {
+               .mask = TPS65910_INT_MSK_PWRON_LP_IT_MSK_MASK,
+               .reg_offset = 0,
+       },
+       [TPS65910_IRQ_PWRHOLD] = {
+               .mask = TPS65910_INT_MSK_PWRHOLD_IT_MSK_MASK,
+               .reg_offset = 0,
+       },
+       [TPS65910_IRQ_HOTDIE] = {
+               .mask = TPS65910_INT_MSK_HOTDIE_IT_MSK_MASK,
+               .reg_offset = 0,
+       },
+       [TPS65910_IRQ_RTC_ALARM] = {
+               .mask = TPS65910_INT_MSK_RTC_ALARM_IT_MSK_MASK,
+               .reg_offset = 0,
+       },
+       [TPS65910_IRQ_RTC_PERIOD] = {
+               .mask = TPS65910_INT_MSK_RTC_PERIOD_IT_MSK_MASK,
+               .reg_offset = 0,
+       },
+
+       /* INT_STS2 */
+       [TPS65910_IRQ_GPIO_R] = {
+               .mask = TPS65910_INT_MSK2_GPIO0_F_IT_MSK_MASK,
+               .reg_offset = 1,
+       },
+       [TPS65910_IRQ_GPIO_F] = {
+               .mask = TPS65910_INT_MSK2_GPIO0_R_IT_MSK_MASK,
+               .reg_offset = 1,
+       },
+};
+
+static struct regmap_irq_chip tps65911_irq_chip = {
+       .name = "tps65910",
+       .irqs = tps65911_irqs,
+       .num_irqs = ARRAY_SIZE(tps65911_irqs),
+       .num_regs = 3,
+       .irq_reg_stride = 2,
+       .status_base = TPS65910_INT_STS,
+       .mask_base = TPS65910_INT_MSK,
+       .ack_base = TPS65910_INT_STS,
+};
+
+static struct regmap_irq_chip tps65910_irq_chip = {
+       .name = "tps65910",
+       .irqs = tps65910_irqs,
+       .num_irqs = ARRAY_SIZE(tps65910_irqs),
+       .num_regs = 2,
+       .irq_reg_stride = 2,
+       .status_base = TPS65910_INT_STS,
+       .mask_base = TPS65910_INT_MSK,
+       .ack_base = TPS65910_INT_STS,
+};
+
+static int tps65910_irq_init(struct tps65910 *tps65910, int irq,
+                   struct tps65910_platform_data *pdata)
+{
+       int ret = 0;
+       static struct regmap_irq_chip *tps6591x_irqs_chip;
+
+       if (!irq) {
+               dev_warn(tps65910->dev, "No interrupt support, no core IRQ\n");
+               return -EINVAL;
+       }
+
+       if (!pdata) {
+               dev_warn(tps65910->dev, "No interrupt support, no pdata\n");
+               return -EINVAL;
+       }
+
+       switch (tps65910_chip_id(tps65910)) {
+       case TPS65910:
+               tps6591x_irqs_chip = &tps65910_irq_chip;
+               break;
+       case TPS65911:
+               tps6591x_irqs_chip = &tps65911_irq_chip;
+               break;
+       }
+
+       tps65910->chip_irq = irq;
+       ret = regmap_add_irq_chip(tps65910->regmap, tps65910->chip_irq,
+               IRQF_ONESHOT, pdata->irq_base,
+               tps6591x_irqs_chip, &tps65910->irq_data);
+       if (ret < 0)
+               dev_warn(tps65910->dev, "Failed to add irq_chip %d\n", ret);
+       return ret;
+}
+
+static int tps65910_irq_exit(struct tps65910 *tps65910)
+{
+       if (tps65910->chip_irq > 0)
+               regmap_del_irq_chip(tps65910->chip_irq, tps65910->irq_data);
+       return 0;
+}
+
 static bool is_volatile_reg(struct device *dev, unsigned int reg)
 {
        struct tps65910 *tps65910 = dev_get_drvdata(dev);
@@ -270,7 +486,6 @@ static int tps65910_i2c_probe(struct i2c_client *i2c,
        tps65910->dev = &i2c->dev;
        tps65910->i2c_client = i2c;
        tps65910->id = chip_id;
-       mutex_init(&tps65910->io_mutex);
 
        tps65910->regmap = devm_regmap_init_i2c(i2c, &tps65910_regmap_config);
        if (IS_ERR(tps65910->regmap)) {
@@ -279,14 +494,6 @@ static int tps65910_i2c_probe(struct i2c_client *i2c,
                return ret;
        }
 
-       ret = mfd_add_devices(tps65910->dev, -1,
-                             tps65910s, ARRAY_SIZE(tps65910s),
-                             NULL, 0, NULL);
-       if (ret < 0) {
-               dev_err(&i2c->dev, "mfd_add_devices failed: %d\n", ret);
-               return ret;
-       }
-
        init_data->irq = pmic_plat_data->irq;
        init_data->irq_base = pmic_plat_data->irq_base;
 
@@ -299,6 +506,15 @@ static int tps65910_i2c_probe(struct i2c_client *i2c,
                pm_power_off = tps65910_power_off;
        }
 
+       ret = mfd_add_devices(tps65910->dev, -1,
+                             tps65910s, ARRAY_SIZE(tps65910s),
+                             NULL, 0,
+                             regmap_irq_get_domain(tps65910->irq_data));
+       if (ret < 0) {
+               dev_err(&i2c->dev, "mfd_add_devices failed: %d\n", ret);
+               return ret;
+       }
+
        return ret;
 }
 
diff --git a/drivers/mfd/tps80031.c b/drivers/mfd/tps80031.c
new file mode 100644 (file)
index 0000000..10b51f7
--- /dev/null
@@ -0,0 +1,574 @@
+/*
+ * tps80031.c -- TI TPS80031/TPS80032 mfd core driver.
+ *
+ * MFD core driver for TI TPS80031/TPS80032 Fully Integrated
+ * Power Management with Power Path and Battery Charger
+ *
+ * Copyright (c) 2012, NVIDIA Corporation.
+ *
+ * Author: Laxman Dewangan <ldewangan@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind,
+ * whether express or implied; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ * 02111-1307, USA
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/tps80031.h>
+#include <linux/module.h>
+#include <linux/pm.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+static struct resource tps80031_rtc_resources[] = {
+       {
+               .start = TPS80031_INT_RTC_ALARM,
+               .end = TPS80031_INT_RTC_ALARM,
+               .flags = IORESOURCE_IRQ,
+       },
+};
+
+/* TPS80031 sub mfd devices */
+static struct mfd_cell tps80031_cell[] = {
+       {
+               .name = "tps80031-pmic",
+       },
+       {
+               .name = "tps80031-clock",
+       },
+       {
+               .name = "tps80031-rtc",
+               .num_resources = ARRAY_SIZE(tps80031_rtc_resources),
+               .resources = tps80031_rtc_resources,
+       },
+       {
+               .name = "tps80031-gpadc",
+       },
+       {
+               .name = "tps80031-fuel-gauge",
+       },
+       {
+               .name = "tps80031-charger",
+       },
+};
+
+static int tps80031_slave_address[TPS80031_NUM_SLAVES] = {
+       TPS80031_I2C_ID0_ADDR,
+       TPS80031_I2C_ID1_ADDR,
+       TPS80031_I2C_ID2_ADDR,
+       TPS80031_I2C_ID3_ADDR,
+};
+
+struct tps80031_pupd_data {
+       u8      reg;
+       u8      pullup_bit;
+       u8      pulldown_bit;
+};
+
+#define TPS80031_IRQ(_reg, _mask)                      \
+       {                                                       \
+               .reg_offset = (TPS80031_INT_MSK_LINE_##_reg) -  \
+                               TPS80031_INT_MSK_LINE_A,        \
+               .mask = BIT(_mask),                             \
+       }
+
+static const struct regmap_irq tps80031_main_irqs[] = {
+       [TPS80031_INT_PWRON]            = TPS80031_IRQ(A, 0),
+       [TPS80031_INT_RPWRON]           = TPS80031_IRQ(A, 1),
+       [TPS80031_INT_SYS_VLOW]         = TPS80031_IRQ(A, 2),
+       [TPS80031_INT_RTC_ALARM]        = TPS80031_IRQ(A, 3),
+       [TPS80031_INT_RTC_PERIOD]       = TPS80031_IRQ(A, 4),
+       [TPS80031_INT_HOT_DIE]          = TPS80031_IRQ(A, 5),
+       [TPS80031_INT_VXX_SHORT]        = TPS80031_IRQ(A, 6),
+       [TPS80031_INT_SPDURATION]       = TPS80031_IRQ(A, 7),
+       [TPS80031_INT_WATCHDOG]         = TPS80031_IRQ(B, 0),
+       [TPS80031_INT_BAT]              = TPS80031_IRQ(B, 1),
+       [TPS80031_INT_SIM]              = TPS80031_IRQ(B, 2),
+       [TPS80031_INT_MMC]              = TPS80031_IRQ(B, 3),
+       [TPS80031_INT_RES]              = TPS80031_IRQ(B, 4),
+       [TPS80031_INT_GPADC_RT]         = TPS80031_IRQ(B, 5),
+       [TPS80031_INT_GPADC_SW2_EOC]    = TPS80031_IRQ(B, 6),
+       [TPS80031_INT_CC_AUTOCAL]       = TPS80031_IRQ(B, 7),
+       [TPS80031_INT_ID_WKUP]          = TPS80031_IRQ(C, 0),
+       [TPS80031_INT_VBUSS_WKUP]       = TPS80031_IRQ(C, 1),
+       [TPS80031_INT_ID]               = TPS80031_IRQ(C, 2),
+       [TPS80031_INT_VBUS]             = TPS80031_IRQ(C, 3),
+       [TPS80031_INT_CHRG_CTRL]        = TPS80031_IRQ(C, 4),
+       [TPS80031_INT_EXT_CHRG]         = TPS80031_IRQ(C, 5),
+       [TPS80031_INT_INT_CHRG]         = TPS80031_IRQ(C, 6),
+       [TPS80031_INT_RES2]             = TPS80031_IRQ(C, 7),
+};
+
+static struct regmap_irq_chip tps80031_irq_chip = {
+       .name = "tps80031",
+       .irqs = tps80031_main_irqs,
+       .num_irqs = ARRAY_SIZE(tps80031_main_irqs),
+       .num_regs = 3,
+       .status_base = TPS80031_INT_STS_A,
+       .mask_base = TPS80031_INT_MSK_LINE_A,
+};
+
+#define PUPD_DATA(_reg, _pulldown_bit, _pullup_bit)    \
+       {                                               \
+               .reg = TPS80031_CFG_INPUT_PUPD##_reg,   \
+               .pulldown_bit = _pulldown_bit,          \
+               .pullup_bit = _pullup_bit,              \
+       }
+
+static const struct tps80031_pupd_data tps80031_pupds[] = {
+       [TPS80031_PREQ1]                = PUPD_DATA(1, BIT(0),  BIT(1)),
+       [TPS80031_PREQ2A]               = PUPD_DATA(1, BIT(2),  BIT(3)),
+       [TPS80031_PREQ2B]               = PUPD_DATA(1, BIT(4),  BIT(5)),
+       [TPS80031_PREQ2C]               = PUPD_DATA(1, BIT(6),  BIT(7)),
+       [TPS80031_PREQ3]                = PUPD_DATA(2, BIT(0),  BIT(1)),
+       [TPS80031_NRES_WARM]            = PUPD_DATA(2, 0,       BIT(2)),
+       [TPS80031_PWM_FORCE]            = PUPD_DATA(2, BIT(5),  0),
+       [TPS80031_CHRG_EXT_CHRG_STATZ]  = PUPD_DATA(2, 0,       BIT(6)),
+       [TPS80031_SIM]                  = PUPD_DATA(3, BIT(0),  BIT(1)),
+       [TPS80031_MMC]                  = PUPD_DATA(3, BIT(2),  BIT(3)),
+       [TPS80031_GPADC_START]          = PUPD_DATA(3, BIT(4),  0),
+       [TPS80031_DVSI2C_SCL]           = PUPD_DATA(4, 0,       BIT(0)),
+       [TPS80031_DVSI2C_SDA]           = PUPD_DATA(4, 0,       BIT(1)),
+       [TPS80031_CTLI2C_SCL]           = PUPD_DATA(4, 0,       BIT(2)),
+       [TPS80031_CTLI2C_SDA]           = PUPD_DATA(4, 0,       BIT(3)),
+};
+static struct tps80031 *tps80031_power_off_dev;
+
+int tps80031_ext_power_req_config(struct device *dev,
+               unsigned long ext_ctrl_flag, int preq_bit,
+               int state_reg_add, int trans_reg_add)
+{
+       u8 res_ass_reg = 0;
+       int preq_mask_bit = 0;
+       int ret;
+
+       if (!(ext_ctrl_flag & TPS80031_EXT_PWR_REQ))
+               return 0;
+
+       if (ext_ctrl_flag & TPS80031_PWR_REQ_INPUT_PREQ1) {
+               res_ass_reg = TPS80031_PREQ1_RES_ASS_A + (preq_bit >> 3);
+               preq_mask_bit = 5;
+       } else if (ext_ctrl_flag & TPS80031_PWR_REQ_INPUT_PREQ2) {
+               res_ass_reg = TPS80031_PREQ2_RES_ASS_A + (preq_bit >> 3);
+               preq_mask_bit = 6;
+       } else if (ext_ctrl_flag & TPS80031_PWR_REQ_INPUT_PREQ3) {
+               res_ass_reg = TPS80031_PREQ3_RES_ASS_A + (preq_bit >> 3);
+               preq_mask_bit = 7;
+       }
+
+       /* Configure REQ_ASS registers */
+       ret = tps80031_set_bits(dev, TPS80031_SLAVE_ID1, res_ass_reg,
+                                       BIT(preq_bit & 0x7));
+       if (ret < 0) {
+               dev_err(dev, "reg 0x%02x setbit failed, err = %d\n",
+                               res_ass_reg, ret);
+               return ret;
+       }
+
+       /* Unmask the PREQ */
+       ret = tps80031_clr_bits(dev, TPS80031_SLAVE_ID1,
+                       TPS80031_PHOENIX_MSK_TRANSITION, BIT(preq_mask_bit));
+       if (ret < 0) {
+               dev_err(dev, "reg 0x%02x clrbit failed, err = %d\n",
+                       TPS80031_PHOENIX_MSK_TRANSITION, ret);
+               return ret;
+       }
+
+       /* Switch regulator control to resource now */
+       if (ext_ctrl_flag & (TPS80031_PWR_REQ_INPUT_PREQ2 |
+                                       TPS80031_PWR_REQ_INPUT_PREQ3)) {
+               ret = tps80031_update(dev, TPS80031_SLAVE_ID1, state_reg_add,
+                                               0x0, TPS80031_STATE_MASK);
+               if (ret < 0)
+                       dev_err(dev, "reg 0x%02x update failed, err = %d\n",
+                               state_reg_add, ret);
+       } else {
+               ret = tps80031_update(dev, TPS80031_SLAVE_ID1, trans_reg_add,
+                               TPS80031_TRANS_SLEEP_OFF,
+                               TPS80031_TRANS_SLEEP_MASK);
+               if (ret < 0)
+                       dev_err(dev, "reg 0x%02x update failed, err = %d\n",
+                               trans_reg_add, ret);
+       }
+       return ret;
+}
+EXPORT_SYMBOL_GPL(tps80031_ext_power_req_config);
+
+static void tps80031_power_off(void)
+{
+       dev_info(tps80031_power_off_dev->dev, "switching off PMU\n");
+       tps80031_write(tps80031_power_off_dev->dev, TPS80031_SLAVE_ID1,
+                               TPS80031_PHOENIX_DEV_ON, TPS80031_DEVOFF);
+}
+
+static void tps80031_pupd_init(struct tps80031 *tps80031,
+                              struct tps80031_platform_data *pdata)
+{
+       struct tps80031_pupd_init_data *pupd_init_data = pdata->pupd_init_data;
+       int data_size = pdata->pupd_init_data_size;
+       int i;
+
+       for (i = 0; i < data_size; ++i) {
+               struct tps80031_pupd_init_data *pupd_init = &pupd_init_data[i];
+               const struct tps80031_pupd_data *pupd =
+                       &tps80031_pupds[pupd_init->input_pin];
+               u8 update_value = 0;
+               u8 update_mask = pupd->pulldown_bit | pupd->pullup_bit;
+
+               if (pupd_init->setting == TPS80031_PUPD_PULLDOWN)
+                       update_value = pupd->pulldown_bit;
+               else if (pupd_init->setting == TPS80031_PUPD_PULLUP)
+                       update_value = pupd->pullup_bit;
+
+               tps80031_update(tps80031->dev, TPS80031_SLAVE_ID1, pupd->reg,
+                               update_value, update_mask);
+       }
+}
+
+static int tps80031_init_ext_control(struct tps80031 *tps80031,
+                       struct tps80031_platform_data *pdata)
+{
+       struct device *dev = tps80031->dev;
+       int ret;
+       int i;
+
+       /* Clear all external control for this rail */
+       for (i = 0; i < 9; ++i) {
+               ret = tps80031_write(dev, TPS80031_SLAVE_ID1,
+                               TPS80031_PREQ1_RES_ASS_A + i, 0);
+               if (ret < 0) {
+                       dev_err(dev, "reg 0x%02x write failed, err = %d\n",
+                               TPS80031_PREQ1_RES_ASS_A + i, ret);
+                       return ret;
+               }
+       }
+
+       /* Mask the PREQ */
+       ret = tps80031_set_bits(dev, TPS80031_SLAVE_ID1,
+                       TPS80031_PHOENIX_MSK_TRANSITION, 0x7 << 5);
+       if (ret < 0) {
+               dev_err(dev, "reg 0x%02x set_bits failed, err = %d\n",
+                       TPS80031_PHOENIX_MSK_TRANSITION, ret);
+               return ret;
+       }
+       return ret;
+}
+
+static int __devinit tps80031_irq_init(struct tps80031 *tps80031, int irq,
+                               int irq_base)
+{
+       struct device *dev = tps80031->dev;
+       int i, ret;
+
+       /*
+        * The MASK register used for updating status register when
+        * interrupt occurs and LINE register used to pass the status
+        * to actual interrupt line.  As per datasheet:
+        * When INT_MSK_LINE [i] is set to 1, the associated interrupt
+        * number i is INT line masked, which means that no interrupt is
+        * generated on the INT line.
+        * When INT_MSK_LINE [i] is set to 0, the associated interrupt
+        * number i is  line enabled: An interrupt is generated on the
+        * INT line.
+        * In any case, the INT_STS [i] status bit may or may not be updated,
+        * only linked to the INT_MSK_STS [i] configuration register bit.
+        *
+        * When INT_MSK_STS [i] is set to 1, the associated interrupt number
+        * i is status masked, which means that no interrupt is stored in
+        * the INT_STS[i] status bit. Note that no interrupt number i is
+        * generated on the INT line, even if the INT_MSK_LINE [i] register
+        * bit is set to 0.
+        * When INT_MSK_STS [i] is set to 0, the associated interrupt number i
+        * is status enabled: An interrupt status is updated in the INT_STS [i]
+        * register. The interrupt may or may not be generated on the INT line,
+        * depending on the INT_MSK_LINE [i] configuration register bit.
+        */
+       for (i = 0; i < 3; i++)
+               tps80031_write(dev, TPS80031_SLAVE_ID2,
+                               TPS80031_INT_MSK_STS_A + i, 0x00);
+
+       ret = regmap_add_irq_chip(tps80031->regmap[TPS80031_SLAVE_ID2], irq,
+                       IRQF_ONESHOT, irq_base,
+                       &tps80031_irq_chip, &tps80031->irq_data);
+       if (ret < 0) {
+               dev_err(dev, "add irq failed, err = %d\n", ret);
+               return ret;
+       }
+       return ret;
+}
+
+static bool rd_wr_reg_id0(struct device *dev, unsigned int reg)
+{
+       switch (reg) {
+       case TPS80031_SMPS1_CFG_FORCE ... TPS80031_SMPS2_CFG_VOLTAGE:
+               return true;
+       default:
+               return false;
+       }
+}
+
+static bool rd_wr_reg_id1(struct device *dev, unsigned int reg)
+{
+       switch (reg) {
+       case TPS80031_SECONDS_REG ... TPS80031_RTC_RESET_STATUS_REG:
+       case TPS80031_VALIDITY0 ... TPS80031_VALIDITY7:
+       case TPS80031_PHOENIX_START_CONDITION ... TPS80031_KEY_PRESS_DUR_CFG:
+       case TPS80031_SMPS4_CFG_TRANS ... TPS80031_SMPS3_CFG_VOLTAGE:
+       case TPS80031_BROADCAST_ADDR_ALL ... TPS80031_BROADCAST_ADDR_CLK_RST:
+       case TPS80031_VANA_CFG_TRANS ... TPS80031_LDO7_CFG_VOLTAGE:
+       case TPS80031_REGEN1_CFG_TRANS ... TPS80031_TMP_CFG_STATE:
+       case TPS80031_PREQ1_RES_ASS_A ... TPS80031_PREQ3_RES_ASS_C:
+       case TPS80031_SMPS_OFFSET ... TPS80031_BATDEBOUNCING:
+       case TPS80031_CFG_INPUT_PUPD1 ... TPS80031_CFG_SMPS_PD:
+       case TPS80031_BACKUP_REG:
+               return true;
+       default:
+               return false;
+       }
+}
+
+static bool is_volatile_reg_id1(struct device *dev, unsigned int reg)
+{
+       switch (reg) {
+       case TPS80031_SMPS4_CFG_TRANS ... TPS80031_SMPS3_CFG_VOLTAGE:
+       case TPS80031_VANA_CFG_TRANS ... TPS80031_LDO7_CFG_VOLTAGE:
+       case TPS80031_REGEN1_CFG_TRANS ... TPS80031_TMP_CFG_STATE:
+       case TPS80031_PREQ1_RES_ASS_A ... TPS80031_PREQ3_RES_ASS_C:
+       case TPS80031_SMPS_OFFSET ... TPS80031_BATDEBOUNCING:
+       case TPS80031_CFG_INPUT_PUPD1 ... TPS80031_CFG_SMPS_PD:
+               return true;
+       default:
+               return false;
+       }
+}
+
+static bool rd_wr_reg_id2(struct device *dev, unsigned int reg)
+{
+       switch (reg) {
+       case TPS80031_USB_VENDOR_ID_LSB ... TPS80031_USB_OTG_REVISION:
+       case TPS80031_GPADC_CTRL ... TPS80031_CTRL_P1:
+       case TPS80031_RTCH0_LSB ... TPS80031_GPCH0_MSB:
+       case TPS80031_TOGGLE1 ... TPS80031_VIBMODE:
+       case TPS80031_PWM1ON ... TPS80031_PWM2OFF:
+       case TPS80031_FG_REG_00 ... TPS80031_FG_REG_11:
+       case TPS80031_INT_STS_A ... TPS80031_INT_MSK_STS_C:
+       case TPS80031_CONTROLLER_CTRL2 ... TPS80031_LED_PWM_CTRL2:
+               return true;
+       default:
+               return false;
+       }
+}
+
+static bool rd_wr_reg_id3(struct device *dev, unsigned int reg)
+{
+       switch (reg) {
+       case TPS80031_GPADC_TRIM0 ... TPS80031_GPADC_TRIM18:
+               return true;
+       default:
+               return false;
+       }
+}
+
+static const struct regmap_config tps80031_regmap_configs[] = {
+       {
+               .reg_bits = 8,
+               .val_bits = 8,
+               .writeable_reg = rd_wr_reg_id0,
+               .readable_reg = rd_wr_reg_id0,
+               .max_register = TPS80031_MAX_REGISTER,
+       },
+       {
+               .reg_bits = 8,
+               .val_bits = 8,
+               .writeable_reg = rd_wr_reg_id1,
+               .readable_reg = rd_wr_reg_id1,
+               .volatile_reg = is_volatile_reg_id1,
+               .max_register = TPS80031_MAX_REGISTER,
+       },
+       {
+               .reg_bits = 8,
+               .val_bits = 8,
+               .writeable_reg = rd_wr_reg_id2,
+               .readable_reg = rd_wr_reg_id2,
+               .max_register = TPS80031_MAX_REGISTER,
+       },
+       {
+               .reg_bits = 8,
+               .val_bits = 8,
+               .writeable_reg = rd_wr_reg_id3,
+               .readable_reg = rd_wr_reg_id3,
+               .max_register = TPS80031_MAX_REGISTER,
+       },
+};
+
+static int __devinit tps80031_probe(struct i2c_client *client,
+                                       const struct i2c_device_id *id)
+{
+       struct tps80031_platform_data *pdata = client->dev.platform_data;
+       struct tps80031 *tps80031;
+       int ret;
+       uint8_t es_version;
+       uint8_t ep_ver;
+       int i;
+
+       if (!pdata) {
+               dev_err(&client->dev, "tps80031 requires platform data\n");
+               return -EINVAL;
+       }
+
+       tps80031 = devm_kzalloc(&client->dev, sizeof(*tps80031), GFP_KERNEL);
+       if (!tps80031) {
+               dev_err(&client->dev, "Malloc failed for tps80031\n");
+               return -ENOMEM;
+       }
+
+       for (i = 0; i < TPS80031_NUM_SLAVES; i++) {
+               if (tps80031_slave_address[i] == client->addr)
+                       tps80031->clients[i] = client;
+               else
+                       tps80031->clients[i] = i2c_new_dummy(client->adapter,
+                                               tps80031_slave_address[i]);
+               if (!tps80031->clients[i]) {
+                       dev_err(&client->dev, "can't attach client %d\n", i);
+                       ret = -ENOMEM;
+                       goto fail_client_reg;
+               }
+
+               i2c_set_clientdata(tps80031->clients[i], tps80031);
+               tps80031->regmap[i] = devm_regmap_init_i2c(tps80031->clients[i],
+                                       &tps80031_regmap_configs[i]);
+               if (IS_ERR(tps80031->regmap[i])) {
+                       ret = PTR_ERR(tps80031->regmap[i]);
+                       dev_err(&client->dev,
+                               "regmap %d init failed, err %d\n", i, ret);
+                       goto fail_client_reg;
+               }
+       }
+
+       ret = tps80031_read(&client->dev, TPS80031_SLAVE_ID3,
+                       TPS80031_JTAGVERNUM, &es_version);
+       if (ret < 0) {
+               dev_err(&client->dev,
+                       "Silicon version number read failed: %d\n", ret);
+               goto fail_client_reg;
+       }
+
+       ret = tps80031_read(&client->dev, TPS80031_SLAVE_ID3,
+                       TPS80031_EPROM_REV, &ep_ver);
+       if (ret < 0) {
+               dev_err(&client->dev,
+                       "Silicon eeprom version read failed: %d\n", ret);
+               goto fail_client_reg;
+       }
+
+       dev_info(&client->dev, "ES version 0x%02x and EPROM version 0x%02x\n",
+                                       es_version, ep_ver);
+       tps80031->es_version = es_version;
+       tps80031->dev = &client->dev;
+       i2c_set_clientdata(client, tps80031);
+       tps80031->chip_info = id->driver_data;
+
+       ret = tps80031_irq_init(tps80031, client->irq, pdata->irq_base);
+       if (ret) {
+               dev_err(&client->dev, "IRQ init failed: %d\n", ret);
+               goto fail_client_reg;
+       }
+
+       tps80031_pupd_init(tps80031, pdata);
+
+       tps80031_init_ext_control(tps80031, pdata);
+
+       ret = mfd_add_devices(tps80031->dev, -1,
+                       tps80031_cell, ARRAY_SIZE(tps80031_cell),
+                       NULL, 0,
+                       regmap_irq_get_domain(tps80031->irq_data));
+       if (ret < 0) {
+               dev_err(&client->dev, "mfd_add_devices failed: %d\n", ret);
+               goto fail_mfd_add;
+       }
+
+       if (pdata->use_power_off && !pm_power_off) {
+               tps80031_power_off_dev = tps80031;
+               pm_power_off = tps80031_power_off;
+       }
+       return 0;
+
+fail_mfd_add:
+       regmap_del_irq_chip(client->irq, tps80031->irq_data);
+
+fail_client_reg:
+       for (i = 0; i < TPS80031_NUM_SLAVES; i++) {
+               if (tps80031->clients[i]  && (tps80031->clients[i] != client))
+                       i2c_unregister_device(tps80031->clients[i]);
+       }
+       return ret;
+}
+
+static int __devexit tps80031_remove(struct i2c_client *client)
+{
+       struct tps80031 *tps80031 = i2c_get_clientdata(client);
+       int i;
+
+       if (tps80031_power_off_dev == tps80031) {
+               tps80031_power_off_dev = NULL;
+               pm_power_off = NULL;
+       }
+
+       mfd_remove_devices(tps80031->dev);
+
+       regmap_del_irq_chip(client->irq, tps80031->irq_data);
+
+       for (i = 0; i < TPS80031_NUM_SLAVES; i++) {
+               if (tps80031->clients[i] != client)
+                       i2c_unregister_device(tps80031->clients[i]);
+       }
+       return 0;
+}
+
+static const struct i2c_device_id tps80031_id_table[] = {
+       { "tps80031", TPS80031 },
+       { "tps80032", TPS80032 },
+       { }
+};
+MODULE_DEVICE_TABLE(i2c, tps80031_id_table);
+
+static struct i2c_driver tps80031_driver = {
+       .driver = {
+               .name   = "tps80031",
+               .owner  = THIS_MODULE,
+       },
+       .probe          = tps80031_probe,
+       .remove         = __devexit_p(tps80031_remove),
+       .id_table       = tps80031_id_table,
+};
+
+static int __init tps80031_init(void)
+{
+       return i2c_add_driver(&tps80031_driver);
+}
+subsys_initcall(tps80031_init);
+
+static void __exit tps80031_exit(void)
+{
+       i2c_del_driver(&tps80031_driver);
+}
+module_exit(tps80031_exit);
+
+MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
+MODULE_DESCRIPTION("TPS80031 core driver");
+MODULE_LICENSE("GPL v2");
index 11b76c0..4f3baad 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/mutex.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
+#include <linux/regmap.h>
 #include <linux/clk.h>
 #include <linux/err.h>
 #include <linux/device.h>
@@ -65,9 +66,6 @@
 
 /* Triton Core internal information (BEGIN) */
 
-/* Last - for index max*/
-#define TWL4030_MODULE_LAST            TWL4030_MODULE_SECURED_REG
-
 #define TWL_NUM_SLAVES         4
 
 #define SUB_CHIP_ID0 0
@@ -171,13 +169,7 @@ EXPORT_SYMBOL(twl_rev);
 /* Structure for each TWL4030/TWL6030 Slave */
 struct twl_client {
        struct i2c_client *client;
-       u8 address;
-
-       /* max numb of i2c_msg required is for read =2 */
-       struct i2c_msg xfer_msg[2];
-
-       /* To lock access to xfer_msg */
-       struct mutex xfer_lock;
+       struct regmap *regmap;
 };
 
 static struct twl_client twl_modules[TWL_NUM_SLAVES];
@@ -189,7 +181,7 @@ struct twl_mapping {
 };
 static struct twl_mapping *twl_map;
 
-static struct twl_mapping twl4030_map[TWL4030_MODULE_LAST + 1] = {
+static struct twl_mapping twl4030_map[] = {
        /*
         * NOTE:  don't change this table without updating the
         * <linux/i2c/twl.h> defines for TWL4030_MODULE_*
@@ -197,34 +189,62 @@ static struct twl_mapping twl4030_map[TWL4030_MODULE_LAST + 1] = {
         */
 
        { 0, TWL4030_BASEADD_USB },
-
        { 1, TWL4030_BASEADD_AUDIO_VOICE },
        { 1, TWL4030_BASEADD_GPIO },
        { 1, TWL4030_BASEADD_INTBR },
        { 1, TWL4030_BASEADD_PIH },
-       { 1, TWL4030_BASEADD_TEST },
 
+       { 1, TWL4030_BASEADD_TEST },
        { 2, TWL4030_BASEADD_KEYPAD },
        { 2, TWL4030_BASEADD_MADC },
        { 2, TWL4030_BASEADD_INTERRUPTS },
        { 2, TWL4030_BASEADD_LED },
+
        { 2, TWL4030_BASEADD_MAIN_CHARGE },
        { 2, TWL4030_BASEADD_PRECHARGE },
        { 2, TWL4030_BASEADD_PWM0 },
        { 2, TWL4030_BASEADD_PWM1 },
        { 2, TWL4030_BASEADD_PWMA },
+
        { 2, TWL4030_BASEADD_PWMB },
        { 2, TWL5031_BASEADD_ACCESSORY },
        { 2, TWL5031_BASEADD_INTERRUPTS },
-
        { 3, TWL4030_BASEADD_BACKUP },
        { 3, TWL4030_BASEADD_INT },
+
        { 3, TWL4030_BASEADD_PM_MASTER },
        { 3, TWL4030_BASEADD_PM_RECEIVER },
        { 3, TWL4030_BASEADD_RTC },
        { 3, TWL4030_BASEADD_SECURED_REG },
 };
 
+static struct regmap_config twl4030_regmap_config[4] = {
+       {
+               /* Address 0x48 */
+               .reg_bits = 8,
+               .val_bits = 8,
+               .max_register = 0xff,
+       },
+       {
+               /* Address 0x49 */
+               .reg_bits = 8,
+               .val_bits = 8,
+               .max_register = 0xff,
+       },
+       {
+               /* Address 0x4a */
+               .reg_bits = 8,
+               .val_bits = 8,
+               .max_register = 0xff,
+       },
+       {
+               /* Address 0x4b */
+               .reg_bits = 8,
+               .val_bits = 8,
+               .max_register = 0xff,
+       },
+};
+
 static struct twl_mapping twl6030_map[] = {
        /*
         * NOTE:  don't change this table without updating the
@@ -254,14 +274,35 @@ static struct twl_mapping twl6030_map[] = {
        { SUB_CHIP_ID2, TWL6030_BASEADD_RSV },
        { SUB_CHIP_ID2, TWL6030_BASEADD_RSV },
        { SUB_CHIP_ID2, TWL6030_BASEADD_RSV },
+
        { SUB_CHIP_ID0, TWL6030_BASEADD_PM_MASTER },
        { SUB_CHIP_ID0, TWL6030_BASEADD_PM_SLAVE_MISC },
-
        { SUB_CHIP_ID0, TWL6030_BASEADD_RTC },
        { SUB_CHIP_ID0, TWL6030_BASEADD_MEM },
        { SUB_CHIP_ID1, TWL6025_BASEADD_CHARGER },
 };
 
+static struct regmap_config twl6030_regmap_config[3] = {
+       {
+               /* Address 0x48 */
+               .reg_bits = 8,
+               .val_bits = 8,
+               .max_register = 0xff,
+       },
+       {
+               /* Address 0x49 */
+               .reg_bits = 8,
+               .val_bits = 8,
+               .max_register = 0xff,
+       },
+       {
+               /* Address 0x4a */
+               .reg_bits = 8,
+               .val_bits = 8,
+               .max_register = 0xff,
+       },
+};
+
 /*----------------------------------------------------------------------*/
 
 /* Exported Functions */
@@ -283,9 +324,8 @@ int twl_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
        int ret;
        int sid;
        struct twl_client *twl;
-       struct i2c_msg *msg;
 
-       if (unlikely(mod_no > TWL_MODULE_LAST)) {
+       if (unlikely(mod_no >= TWL_MODULE_LAST)) {
                pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no);
                return -EPERM;
        }
@@ -301,32 +341,14 @@ int twl_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
        }
        twl = &twl_modules[sid];
 
-       mutex_lock(&twl->xfer_lock);
-       /*
-        * [MSG1]: fill the register address data
-        * fill the data Tx buffer
-        */
-       msg = &twl->xfer_msg[0];
-       msg->addr = twl->address;
-       msg->len = num_bytes + 1;
-       msg->flags = 0;
-       msg->buf = value;
-       /* over write the first byte of buffer with the register address */
-       *value = twl_map[mod_no].base + reg;
-       ret = i2c_transfer(twl->client->adapter, twl->xfer_msg, 1);
-       mutex_unlock(&twl->xfer_lock);
-
-       /* i2c_transfer returns number of messages transferred */
-       if (ret != 1) {
-               pr_err("%s: i2c_write failed to transfer all messages\n",
-                       DRIVER_NAME);
-               if (ret < 0)
-                       return ret;
-               else
-                       return -EIO;
-       } else {
-               return 0;
-       }
+       ret = regmap_bulk_write(twl->regmap, twl_map[mod_no].base + reg,
+                               value, num_bytes);
+
+       if (ret)
+               pr_err("%s: Write failed (mod %d, reg 0x%02x count %d)\n",
+                      DRIVER_NAME, mod_no, reg, num_bytes);
+
+       return ret;
 }
 EXPORT_SYMBOL(twl_i2c_write);
 
@@ -342,12 +364,10 @@ EXPORT_SYMBOL(twl_i2c_write);
 int twl_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
 {
        int ret;
-       u8 val;
        int sid;
        struct twl_client *twl;
-       struct i2c_msg *msg;
 
-       if (unlikely(mod_no > TWL_MODULE_LAST)) {
+       if (unlikely(mod_no >= TWL_MODULE_LAST)) {
                pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no);
                return -EPERM;
        }
@@ -363,34 +383,14 @@ int twl_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
        }
        twl = &twl_modules[sid];
 
-       mutex_lock(&twl->xfer_lock);
-       /* [MSG1] fill the register address data */
-       msg = &twl->xfer_msg[0];
-       msg->addr = twl->address;
-       msg->len = 1;
-       msg->flags = 0; /* Read the register value */
-       val = twl_map[mod_no].base + reg;
-       msg->buf = &val;
-       /* [MSG2] fill the data rx buffer */
-       msg = &twl->xfer_msg[1];
-       msg->addr = twl->address;
-       msg->flags = I2C_M_RD;  /* Read the register value */
-       msg->len = num_bytes;   /* only n bytes */
-       msg->buf = value;
-       ret = i2c_transfer(twl->client->adapter, twl->xfer_msg, 2);
-       mutex_unlock(&twl->xfer_lock);
-
-       /* i2c_transfer returns number of messages transferred */
-       if (ret != 2) {
-               pr_err("%s: i2c_read failed to transfer all messages\n",
-                       DRIVER_NAME);
-               if (ret < 0)
-                       return ret;
-               else
-                       return -EIO;
-       } else {
-               return 0;
-       }
+       ret = regmap_bulk_read(twl->regmap, twl_map[mod_no].base + reg,
+                              value, num_bytes);
+
+       if (ret)
+               pr_err("%s: Read failed (mod %d, reg 0x%02x count %d)\n",
+                      DRIVER_NAME, mod_no, reg, num_bytes);
+
+       return ret;
 }
 EXPORT_SYMBOL(twl_i2c_read);
 
@@ -404,12 +404,7 @@ EXPORT_SYMBOL(twl_i2c_read);
  */
 int twl_i2c_write_u8(u8 mod_no, u8 value, u8 reg)
 {
-
-       /* 2 bytes offset 1 contains the data offset 0 is used by i2c_write */
-       u8 temp_buffer[2] = { 0 };
-       /* offset 1 contains the data */
-       temp_buffer[1] = value;
-       return twl_i2c_write(mod_no, temp_buffer, reg, 1);
+       return twl_i2c_write(mod_no, &value, reg, 1);
 }
 EXPORT_SYMBOL(twl_i2c_write_u8);
 
@@ -646,8 +641,9 @@ add_children(struct twl4030_platform_data *pdata, unsigned irq_base,
                        return PTR_ERR(child);
        }
 
-       if (IS_ENABLED(CONFIG_TWL4030_MADC) && pdata->madc) {
-               child = add_child(2, "twl4030_madc",
+       if (IS_ENABLED(CONFIG_TWL4030_MADC) && pdata->madc &&
+           twl_class_is_4030()) {
+               child = add_child(SUB_CHIP_ID2, "twl4030_madc",
                                pdata->madc, sizeof(*pdata->madc),
                                true, irq_base + MADC_INTR_OFFSET, 0);
                if (IS_ERR(child))
@@ -663,15 +659,21 @@ add_children(struct twl4030_platform_data *pdata, unsigned irq_base,
                 * HW security concerns, and "least privilege".
                 */
                sub_chip_id = twl_map[TWL_MODULE_RTC].sid;
-               child = add_child(sub_chip_id, "twl_rtc",
-                               NULL, 0,
+               child = add_child(sub_chip_id, "twl_rtc", NULL, 0,
                                true, irq_base + RTC_INTR_OFFSET, 0);
                if (IS_ERR(child))
                        return PTR_ERR(child);
        }
 
-       if (IS_ENABLED(CONFIG_PWM_TWL6030) && twl_class_is_6030()) {
-               child = add_child(SUB_CHIP_ID1, "twl6030-pwm", NULL, 0,
+       if (IS_ENABLED(CONFIG_PWM_TWL)) {
+               child = add_child(SUB_CHIP_ID1, "twl-pwm", NULL, 0,
+                                 false, 0, 0);
+               if (IS_ERR(child))
+                       return PTR_ERR(child);
+       }
+
+       if (IS_ENABLED(CONFIG_PWM_TWL_LED)) {
+               child = add_child(SUB_CHIP_ID1, "twl-pwmled", NULL, 0,
                                  false, 0, 0);
                if (IS_ERR(child))
                        return PTR_ERR(child);
@@ -723,9 +725,8 @@ add_children(struct twl4030_platform_data *pdata, unsigned irq_base,
 
                }
 
-               child = add_child(0, "twl4030_usb",
-                               pdata->usb, sizeof(*pdata->usb),
-                               true,
+               child = add_child(SUB_CHIP_ID0, "twl4030_usb",
+                               pdata->usb, sizeof(*pdata->usb), true,
                                /* irq0 = USB_PRES, irq1 = USB */
                                irq_base + USB_PRES_INTR_OFFSET,
                                irq_base + USB_INTR_OFFSET);
@@ -773,9 +774,8 @@ add_children(struct twl4030_platform_data *pdata, unsigned irq_base,
 
                pdata->usb->features = features;
 
-               child = add_child(0, "twl6030_usb",
-                       pdata->usb, sizeof(*pdata->usb),
-                       true,
+               child = add_child(SUB_CHIP_ID0, "twl6030_usb",
+                       pdata->usb, sizeof(*pdata->usb), true,
                        /* irq1 = VBUS_PRES, irq0 = USB ID */
                        irq_base + USBOTG_INTR_OFFSET,
                        irq_base + USB_PRES_INTR_OFFSET);
@@ -799,22 +799,22 @@ add_children(struct twl4030_platform_data *pdata, unsigned irq_base,
        }
 
        if (IS_ENABLED(CONFIG_TWL4030_WATCHDOG) && twl_class_is_4030()) {
-               child = add_child(0, "twl4030_wdt", NULL, 0, false, 0, 0);
+               child = add_child(SUB_CHIP_ID3, "twl4030_wdt", NULL, 0,
+                                 false, 0, 0);
                if (IS_ERR(child))
                        return PTR_ERR(child);
        }
 
        if (IS_ENABLED(CONFIG_INPUT_TWL4030_PWRBUTTON) && twl_class_is_4030()) {
-               child = add_child(1, "twl4030_pwrbutton",
-                               NULL, 0, true, irq_base + 8 + 0, 0);
+               child = add_child(SUB_CHIP_ID3, "twl4030_pwrbutton", NULL, 0,
+                                 true, irq_base + 8 + 0, 0);
                if (IS_ERR(child))
                        return PTR_ERR(child);
        }
 
        if (IS_ENABLED(CONFIG_MFD_TWL4030_AUDIO) && pdata->audio &&
            twl_class_is_4030()) {
-               sub_chip_id = twl_map[TWL_MODULE_AUDIO_VOICE].sid;
-               child = add_child(sub_chip_id, "twl4030-audio",
+               child = add_child(SUB_CHIP_ID1, "twl4030-audio",
                                pdata->audio, sizeof(*pdata->audio),
                                false, 0, 0);
                if (IS_ERR(child))
@@ -1054,7 +1054,7 @@ add_children(struct twl4030_platform_data *pdata, unsigned irq_base,
 
        if (IS_ENABLED(CONFIG_CHARGER_TWL4030) && pdata->bci &&
                        !(features & (TPS_SUBSET | TWL5031))) {
-               child = add_child(3, "twl4030_bci",
+               child = add_child(SUB_CHIP_ID3, "twl4030_bci",
                                pdata->bci, sizeof(*pdata->bci), false,
                                /* irq0 = CHG_PRES, irq1 = BCI */
                                irq_base + BCI_PRES_INTR_OFFSET,
@@ -1077,8 +1077,8 @@ static inline int __init protect_pm_master(void)
 {
        int e = 0;
 
-       e = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0,
-                       TWL4030_PM_MASTER_PROTECT_KEY);
+       e = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, 0,
+                            TWL4030_PM_MASTER_PROTECT_KEY);
        return e;
 }
 
@@ -1086,12 +1086,10 @@ static inline int __init unprotect_pm_master(void)
 {
        int e = 0;
 
-       e |= twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER,
-                       TWL4030_PM_MASTER_KEY_CFG1,
-                       TWL4030_PM_MASTER_PROTECT_KEY);
-       e |= twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER,
-                       TWL4030_PM_MASTER_KEY_CFG2,
-                       TWL4030_PM_MASTER_PROTECT_KEY);
+       e |= twl_i2c_write_u8(TWL_MODULE_PM_MASTER, TWL4030_PM_MASTER_KEY_CFG1,
+                             TWL4030_PM_MASTER_PROTECT_KEY);
+       e |= twl_i2c_write_u8(TWL_MODULE_PM_MASTER, TWL4030_PM_MASTER_KEY_CFG2,
+                             TWL4030_PM_MASTER_PROTECT_KEY);
 
        return e;
 }
@@ -1176,6 +1174,7 @@ twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
        struct twl4030_platform_data    *pdata = client->dev.platform_data;
        struct device_node              *node = client->dev.of_node;
        struct platform_device          *pdev;
+       struct regmap_config            *twl_regmap_config;
        int                             irq_base = 0;
        int                             status;
        unsigned                        i, num_slaves;
@@ -1229,22 +1228,23 @@ twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
        if ((id->driver_data) & TWL6030_CLASS) {
                twl_id = TWL6030_CLASS_ID;
                twl_map = &twl6030_map[0];
+               twl_regmap_config = twl6030_regmap_config;
                num_slaves = TWL_NUM_SLAVES - 1;
        } else {
                twl_id = TWL4030_CLASS_ID;
                twl_map = &twl4030_map[0];
+               twl_regmap_config = twl4030_regmap_config;
                num_slaves = TWL_NUM_SLAVES;
        }
 
        for (i = 0; i < num_slaves; i++) {
                struct twl_client *twl = &twl_modules[i];
 
-               twl->address = client->addr + i;
                if (i == 0) {
                        twl->client = client;
                } else {
                        twl->client = i2c_new_dummy(client->adapter,
-                                       twl->address);
+                                                   client->addr + i);
                        if (!twl->client) {
                                dev_err(&client->dev,
                                        "can't attach client %d\n", i);
@@ -1252,7 +1252,16 @@ twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
                                goto fail;
                        }
                }
-               mutex_init(&twl->xfer_lock);
+
+               twl->regmap = devm_regmap_init_i2c(twl->client,
+                                                  &twl_regmap_config[i]);
+               if (IS_ERR(twl->regmap)) {
+                       status = PTR_ERR(twl->regmap);
+                       dev_err(&client->dev,
+                               "Failed to allocate regmap %d, err: %d\n", i,
+                               status);
+                       goto fail;
+               }
        }
 
        inuse = true;
index cdd1173..a5f9888 100644 (file)
@@ -295,8 +295,8 @@ static irqreturn_t handle_twl4030_pih(int irq, void *devid)
        irqreturn_t     ret;
        u8              pih_isr;
 
-       ret = twl_i2c_read_u8(TWL4030_MODULE_PIH, &pih_isr,
-                       REG_PIH_ISR_P1);
+       ret = twl_i2c_read_u8(TWL_MODULE_PIH, &pih_isr,
+                             REG_PIH_ISR_P1);
        if (ret) {
                pr_warning("twl4030: I2C error %d reading PIH ISR\n", ret);
                return IRQ_NONE;
@@ -501,7 +501,7 @@ static void twl4030_sih_bus_sync_unlock(struct irq_data *data)
                } imr;
 
                /* byte[0] gets overwritten as we write ... */
-               imr.word = cpu_to_le32(agent->imr << 8);
+               imr.word = cpu_to_le32(agent->imr);
                agent->imr_change_pending = false;
 
                /* write the whole mask ... simpler than subsetting it */
@@ -526,7 +526,7 @@ static void twl4030_sih_bus_sync_unlock(struct irq_data *data)
                 * any processor on the other IRQ line, EDR registers are
                 * shared.
                 */
-               status = twl_i2c_read(sih->module, bytes + 1,
+               status = twl_i2c_read(sih->module, bytes,
                                sih->edr_offset, sih->bytes_edr);
                if (status) {
                        pr_err("twl4030: %s, %s --> %d\n", __func__,
@@ -538,7 +538,7 @@ static void twl4030_sih_bus_sync_unlock(struct irq_data *data)
                while (edge_change) {
                        int             i = fls(edge_change) - 1;
                        struct irq_data *idata;
-                       int             byte = 1 + (i >> 2);
+                       int             byte = i >> 2;
                        int             off = (i & 0x3) * 2;
                        unsigned int    type;
 
index a39dcf3..88ff9dc 100644 (file)
@@ -173,7 +173,7 @@ static int twl4030battery_temperature(int raw_volt)
 
        volt = (raw_volt * TEMP_STEP_SIZE) / TEMP_PSR_R;
        /* Getting and calculating the supply current in micro ampers */
-       ret = twl_i2c_read_u8(TWL4030_MODULE_MAIN_CHARGE, &val,
+       ret = twl_i2c_read_u8(TWL_MODULE_MAIN_CHARGE, &val,
                REG_BCICTL2);
        if (ret < 0)
                return ret;
@@ -196,7 +196,7 @@ static int twl4030battery_current(int raw_volt)
        int ret;
        u8 val;
 
-       ret = twl_i2c_read_u8(TWL4030_MODULE_MAIN_CHARGE, &val,
+       ret = twl_i2c_read_u8(TWL_MODULE_MAIN_CHARGE, &val,
                TWL4030_BCI_BCICTL1);
        if (ret)
                return ret;
@@ -635,7 +635,7 @@ static int twl4030_madc_set_current_generator(struct twl4030_madc_data *madc,
        int ret;
        u8 regval;
 
-       ret = twl_i2c_read_u8(TWL4030_MODULE_MAIN_CHARGE,
+       ret = twl_i2c_read_u8(TWL_MODULE_MAIN_CHARGE,
                              &regval, TWL4030_BCI_BCICTL1);
        if (ret) {
                dev_err(madc->dev, "unable to read BCICTL1 reg 0x%X",
@@ -646,7 +646,7 @@ static int twl4030_madc_set_current_generator(struct twl4030_madc_data *madc,
                regval |= chan ? TWL4030_BCI_ITHEN : TWL4030_BCI_TYPEN;
        else
                regval &= chan ? ~TWL4030_BCI_ITHEN : ~TWL4030_BCI_TYPEN;
-       ret = twl_i2c_write_u8(TWL4030_MODULE_MAIN_CHARGE,
+       ret = twl_i2c_write_u8(TWL_MODULE_MAIN_CHARGE,
                               regval, TWL4030_BCI_BCICTL1);
        if (ret) {
                dev_err(madc->dev, "unable to write BCICTL1 reg 0x%X\n",
@@ -668,7 +668,7 @@ static int twl4030_madc_set_power(struct twl4030_madc_data *madc, int on)
        u8 regval;
        int ret;
 
-       ret = twl_i2c_read_u8(TWL4030_MODULE_MAIN_CHARGE,
+       ret = twl_i2c_read_u8(TWL_MODULE_MAIN_CHARGE,
                              &regval, TWL4030_MADC_CTRL1);
        if (ret) {
                dev_err(madc->dev, "unable to read madc ctrl1 reg 0x%X\n",
@@ -725,7 +725,7 @@ static int twl4030_madc_probe(struct platform_device *pdev)
        if (ret < 0)
                goto err_current_generator;
 
-       ret = twl_i2c_read_u8(TWL4030_MODULE_MAIN_CHARGE,
+       ret = twl_i2c_read_u8(TWL_MODULE_MAIN_CHARGE,
                              &regval, TWL4030_BCI_BCICTL1);
        if (ret) {
                dev_err(&pdev->dev, "unable to read reg BCI CTL1 0x%X\n",
@@ -733,7 +733,7 @@ static int twl4030_madc_probe(struct platform_device *pdev)
                goto err_i2c;
        }
        regval |= TWL4030_BCI_MESBAT;
-       ret = twl_i2c_write_u8(TWL4030_MODULE_MAIN_CHARGE,
+       ret = twl_i2c_write_u8(TWL_MODULE_MAIN_CHARGE,
                               regval, TWL4030_BCI_BCICTL1);
        if (ret) {
                dev_err(&pdev->dev, "unable to write reg BCI Ctl1 0x%X\n",
index a533206..4dae241 100644 (file)
@@ -128,12 +128,10 @@ static int twl4030_write_script_byte(u8 address, u8 byte)
 {
        int err;
 
-       err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, address,
-                               R_MEMORY_ADDRESS);
+       err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, address, R_MEMORY_ADDRESS);
        if (err)
                goto out;
-       err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, byte,
-                               R_MEMORY_DATA);
+       err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, byte, R_MEMORY_DATA);
 out:
        return err;
 }
@@ -189,19 +187,16 @@ static int twl4030_config_wakeup3_sequence(u8 address)
        u8 data;
 
        /* Set SLEEP to ACTIVE SEQ address for P3 */
-       err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, address,
-                               R_SEQ_ADD_S2A3);
+       err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, address, R_SEQ_ADD_S2A3);
        if (err)
                goto out;
 
        /* P3 LVL_WAKEUP should be on LEVEL */
-       err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &data,
-                               R_P3_SW_EVENTS);
+       err = twl_i2c_read_u8(TWL_MODULE_PM_MASTER, &data, R_P3_SW_EVENTS);
        if (err)
                goto out;
        data |= LVL_WAKEUP;
-       err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, data,
-                               R_P3_SW_EVENTS);
+       err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, data, R_P3_SW_EVENTS);
 out:
        if (err)
                pr_err("TWL4030 wakeup sequence for P3 config error\n");
@@ -214,43 +209,38 @@ static int twl4030_config_wakeup12_sequence(u8 address)
        u8 data;
 
        /* Set SLEEP to ACTIVE SEQ address for P1 and P2 */
-       err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, address,
-                               R_SEQ_ADD_S2A12);
+       err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, address, R_SEQ_ADD_S2A12);
        if (err)
                goto out;
 
        /* P1/P2 LVL_WAKEUP should be on LEVEL */
-       err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &data,
-                               R_P1_SW_EVENTS);
+       err = twl_i2c_read_u8(TWL_MODULE_PM_MASTER, &data, R_P1_SW_EVENTS);
        if (err)
                goto out;
 
        data |= LVL_WAKEUP;
-       err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, data,
-                               R_P1_SW_EVENTS);
+       err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, data, R_P1_SW_EVENTS);
        if (err)
                goto out;
 
-       err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &data,
-                               R_P2_SW_EVENTS);
+       err = twl_i2c_read_u8(TWL_MODULE_PM_MASTER, &data, R_P2_SW_EVENTS);
        if (err)
                goto out;
 
        data |= LVL_WAKEUP;
-       err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, data,
-                               R_P2_SW_EVENTS);
+       err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, data, R_P2_SW_EVENTS);
        if (err)
                goto out;
 
        if (machine_is_omap_3430sdp() || machine_is_omap_ldp()) {
                /* Disabling AC charger effect on sleep-active transitions */
-               err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &data,
-                                       R_CFG_P1_TRANSITION);
+               err = twl_i2c_read_u8(TWL_MODULE_PM_MASTER, &data,
+                                     R_CFG_P1_TRANSITION);
                if (err)
                        goto out;
                data &= ~(1<<1);
-               err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, data ,
-                                       R_CFG_P1_TRANSITION);
+               err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, data,
+                                      R_CFG_P1_TRANSITION);
                if (err)
                        goto out;
        }
@@ -267,8 +257,7 @@ static int twl4030_config_sleep_sequence(u8 address)
        int err;
 
        /* Set ACTIVE to SLEEP SEQ address in T2 memory*/
-       err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, address,
-                               R_SEQ_ADD_A2S);
+       err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, address, R_SEQ_ADD_A2S);
 
        if (err)
                pr_err("TWL4030 sleep sequence config error\n");
@@ -282,42 +271,35 @@ static int twl4030_config_warmreset_sequence(u8 address)
        u8 rd_data;
 
        /* Set WARM RESET SEQ address for P1 */
-       err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, address,
-                               R_SEQ_ADD_WARM);
+       err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, address, R_SEQ_ADD_WARM);
        if (err)
                goto out;
 
        /* P1/P2/P3 enable WARMRESET */
-       err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &rd_data,
-                               R_P1_SW_EVENTS);
+       err = twl_i2c_read_u8(TWL_MODULE_PM_MASTER, &rd_data, R_P1_SW_EVENTS);
        if (err)
                goto out;
 
        rd_data |= ENABLE_WARMRESET;
-       err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, rd_data,
-                               R_P1_SW_EVENTS);
+       err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, rd_data, R_P1_SW_EVENTS);
        if (err)
                goto out;
 
-       err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &rd_data,
-                               R_P2_SW_EVENTS);
+       err = twl_i2c_read_u8(TWL_MODULE_PM_MASTER, &rd_data, R_P2_SW_EVENTS);
        if (err)
                goto out;
 
        rd_data |= ENABLE_WARMRESET;
-       err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, rd_data,
-                               R_P2_SW_EVENTS);
+       err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, rd_data, R_P2_SW_EVENTS);
        if (err)
                goto out;
 
-       err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &rd_data,
-                               R_P3_SW_EVENTS);
+       err = twl_i2c_read_u8(TWL_MODULE_PM_MASTER, &rd_data, R_P3_SW_EVENTS);
        if (err)
                goto out;
 
        rd_data |= ENABLE_WARMRESET;
-       err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, rd_data,
-                               R_P3_SW_EVENTS);
+       err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, rd_data, R_P3_SW_EVENTS);
 out:
        if (err)
                pr_err("TWL4030 warmreset seq config error\n");
@@ -341,7 +323,7 @@ static int twl4030_configure_resource(struct twl4030_resconfig *rconfig)
        rconfig_addr = res_config_addrs[rconfig->resource];
 
        /* Set resource group */
-       err = twl_i2c_read_u8(TWL4030_MODULE_PM_RECEIVER, &grp,
+       err = twl_i2c_read_u8(TWL_MODULE_PM_RECEIVER, &grp,
                              rconfig_addr + DEV_GRP_OFFSET);
        if (err) {
                pr_err("TWL4030 Resource %d group could not be read\n",
@@ -352,7 +334,7 @@ static int twl4030_configure_resource(struct twl4030_resconfig *rconfig)
        if (rconfig->devgroup != TWL4030_RESCONFIG_UNDEF) {
                grp &= ~DEV_GRP_MASK;
                grp |= rconfig->devgroup << DEV_GRP_SHIFT;
-               err = twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER,
+               err = twl_i2c_write_u8(TWL_MODULE_PM_RECEIVER,
                                       grp, rconfig_addr + DEV_GRP_OFFSET);
                if (err < 0) {
                        pr_err("TWL4030 failed to program devgroup\n");
@@ -361,7 +343,7 @@ static int twl4030_configure_resource(struct twl4030_resconfig *rconfig)
        }
 
        /* Set resource types */
-       err = twl_i2c_read_u8(TWL4030_MODULE_PM_RECEIVER, &type,
+       err = twl_i2c_read_u8(TWL_MODULE_PM_RECEIVER, &type,
                                rconfig_addr + TYPE_OFFSET);
        if (err < 0) {
                pr_err("TWL4030 Resource %d type could not be read\n",
@@ -379,7 +361,7 @@ static int twl4030_configure_resource(struct twl4030_resconfig *rconfig)
                type |= rconfig->type2 << TYPE2_SHIFT;
        }
 
-       err = twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER,
+       err = twl_i2c_write_u8(TWL_MODULE_PM_RECEIVER,
                                type, rconfig_addr + TYPE_OFFSET);
        if (err < 0) {
                pr_err("TWL4030 failed to program resource type\n");
@@ -387,7 +369,7 @@ static int twl4030_configure_resource(struct twl4030_resconfig *rconfig)
        }
 
        /* Set remap states */
-       err = twl_i2c_read_u8(TWL4030_MODULE_PM_RECEIVER, &remap,
+       err = twl_i2c_read_u8(TWL_MODULE_PM_RECEIVER, &remap,
                              rconfig_addr + REMAP_OFFSET);
        if (err < 0) {
                pr_err("TWL4030 Resource %d remap could not be read\n",
@@ -405,7 +387,7 @@ static int twl4030_configure_resource(struct twl4030_resconfig *rconfig)
                remap |= rconfig->remap_sleep << SLEEP_STATE_SHIFT;
        }
 
-       err = twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER,
+       err = twl_i2c_write_u8(TWL_MODULE_PM_RECEIVER,
                               remap,
                               rconfig_addr + REMAP_OFFSET);
        if (err < 0) {
@@ -463,49 +445,47 @@ int twl4030_remove_script(u8 flags)
 {
        int err = 0;
 
-       err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER,
-                       TWL4030_PM_MASTER_KEY_CFG1,
-                       TWL4030_PM_MASTER_PROTECT_KEY);
+       err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, TWL4030_PM_MASTER_KEY_CFG1,
+                              TWL4030_PM_MASTER_PROTECT_KEY);
        if (err) {
                pr_err("twl4030: unable to unlock PROTECT_KEY\n");
                return err;
        }
 
-       err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER,
-                       TWL4030_PM_MASTER_KEY_CFG2,
-                       TWL4030_PM_MASTER_PROTECT_KEY);
+       err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, TWL4030_PM_MASTER_KEY_CFG2,
+                              TWL4030_PM_MASTER_PROTECT_KEY);
        if (err) {
                pr_err("twl4030: unable to unlock PROTECT_KEY\n");
                return err;
        }
 
        if (flags & TWL4030_WRST_SCRIPT) {
-               err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, END_OF_SCRIPT,
-                               R_SEQ_ADD_WARM);
+               err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, END_OF_SCRIPT,
+                                      R_SEQ_ADD_WARM);
                if (err)
                        return err;
        }
        if (flags & TWL4030_WAKEUP12_SCRIPT) {
-               err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, END_OF_SCRIPT,
-                               R_SEQ_ADD_S2A12);
+               err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, END_OF_SCRIPT,
+                                      R_SEQ_ADD_S2A12);
                if (err)
                        return err;
        }
        if (flags & TWL4030_WAKEUP3_SCRIPT) {
-               err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, END_OF_SCRIPT,
-                               R_SEQ_ADD_S2A3);
+               err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, END_OF_SCRIPT,
+                                      R_SEQ_ADD_S2A3);
                if (err)
                        return err;
        }
        if (flags & TWL4030_SLEEP_SCRIPT) {
-               err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, END_OF_SCRIPT,
-                               R_SEQ_ADD_A2S);
+               err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, END_OF_SCRIPT,
+                                      R_SEQ_ADD_A2S);
                if (err)
                        return err;
        }
 
-       err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0,
-                       TWL4030_PM_MASTER_PROTECT_KEY);
+       err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, 0,
+                              TWL4030_PM_MASTER_PROTECT_KEY);
        if (err)
                pr_err("TWL4030 Unable to relock registers\n");
 
@@ -521,7 +501,7 @@ void twl4030_power_off(void)
 {
        int err;
 
-       err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, PWR_DEVOFF,
+       err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, PWR_DEVOFF,
                               TWL4030_PM_MASTER_P1_SW_EVENTS);
        if (err)
                pr_err("TWL4030 Unable to power off\n");
@@ -534,15 +514,13 @@ void twl4030_power_init(struct twl4030_power_data *twl4030_scripts)
        struct twl4030_resconfig *resconfig;
        u8 val, address = twl4030_start_script_address;
 
-       err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER,
-                       TWL4030_PM_MASTER_KEY_CFG1,
-                       TWL4030_PM_MASTER_PROTECT_KEY);
+       err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, TWL4030_PM_MASTER_KEY_CFG1,
+                              TWL4030_PM_MASTER_PROTECT_KEY);
        if (err)
                goto unlock;
 
-       err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER,
-                       TWL4030_PM_MASTER_KEY_CFG2,
-                       TWL4030_PM_MASTER_PROTECT_KEY);
+       err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, TWL4030_PM_MASTER_KEY_CFG2,
+                              TWL4030_PM_MASTER_PROTECT_KEY);
        if (err)
                goto unlock;
 
@@ -567,14 +545,14 @@ void twl4030_power_init(struct twl4030_power_data *twl4030_scripts)
        /* Board has to be wired properly to use this feature */
        if (twl4030_scripts->use_poweroff && !pm_power_off) {
                /* Default for SEQ_OFFSYNC is set, lets ensure this */
-               err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &val,
+               err = twl_i2c_read_u8(TWL_MODULE_PM_MASTER, &val,
                                      TWL4030_PM_MASTER_CFG_P123_TRANSITION);
                if (err) {
                        pr_warning("TWL4030 Unable to read registers\n");
 
                } else if (!(val & SEQ_OFFSYNC)) {
                        val |= SEQ_OFFSYNC;
-                       err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, val,
+                       err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, val,
                                        TWL4030_PM_MASTER_CFG_P123_TRANSITION);
                        if (err) {
                                pr_err("TWL4030 Unable to setup SEQ_OFFSYNC\n");
@@ -586,8 +564,8 @@ void twl4030_power_init(struct twl4030_power_data *twl4030_scripts)
        }
 
 relock:
-       err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0,
-                       TWL4030_PM_MASTER_PROTECT_KEY);
+       err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, 0,
+                              TWL4030_PM_MASTER_PROTECT_KEY);
        if (err)
                pr_err("TWL4030 Unable to relock registers\n");
        return;
index b76902f..277a8db 100644 (file)
@@ -355,7 +355,7 @@ int twl6030_init_irq(struct device *dev, int irq_num)
        static struct irq_chip  twl6030_irq_chip;
        int                     status = 0;
        int                     i;
-       u8                      mask[4];
+       u8                      mask[3];
 
        nr_irqs = TWL6030_NR_IRQS;
 
@@ -370,9 +370,9 @@ int twl6030_init_irq(struct device *dev, int irq_num)
 
        irq_end = irq_base + nr_irqs;
 
+       mask[0] = 0xFF;
        mask[1] = 0xFF;
        mask[2] = 0xFF;
-       mask[3] = 0xFF;
 
        /* mask all int lines */
        twl_i2c_write(TWL_MODULE_PIH, &mask[0], REG_INT_MSK_LINE_A, 3);
diff --git a/drivers/mfd/twl6040-irq.c b/drivers/mfd/twl6040-irq.c
deleted file mode 100644 (file)
index 4b42543..0000000
+++ /dev/null
@@ -1,205 +0,0 @@
-/*
- * Interrupt controller support for TWL6040
- *
- * Author:     Misael Lopez Cruz <misael.lopez@ti.com>
- *
- * Copyright:   (C) 2011 Texas Instruments, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/err.h>
-#include <linux/irq.h>
-#include <linux/of.h>
-#include <linux/irqdomain.h>
-#include <linux/interrupt.h>
-#include <linux/mfd/core.h>
-#include <linux/mfd/twl6040.h>
-
-struct twl6040_irq_data {
-       int mask;
-       int status;
-};
-
-static struct twl6040_irq_data twl6040_irqs[] = {
-       {
-               .mask = TWL6040_THMSK,
-               .status = TWL6040_THINT,
-       },
-       {
-               .mask = TWL6040_PLUGMSK,
-               .status = TWL6040_PLUGINT | TWL6040_UNPLUGINT,
-       },
-       {
-               .mask = TWL6040_HOOKMSK,
-               .status = TWL6040_HOOKINT,
-       },
-       {
-               .mask = TWL6040_HFMSK,
-               .status = TWL6040_HFINT,
-       },
-       {
-               .mask = TWL6040_VIBMSK,
-               .status = TWL6040_VIBINT,
-       },
-       {
-               .mask = TWL6040_READYMSK,
-               .status = TWL6040_READYINT,
-       },
-};
-
-static inline
-struct twl6040_irq_data *irq_to_twl6040_irq(struct twl6040 *twl6040,
-                                           int irq)
-{
-       return &twl6040_irqs[irq - twl6040->irq_base];
-}
-
-static void twl6040_irq_lock(struct irq_data *data)
-{
-       struct twl6040 *twl6040 = irq_data_get_irq_chip_data(data);
-
-       mutex_lock(&twl6040->irq_mutex);
-}
-
-static void twl6040_irq_sync_unlock(struct irq_data *data)
-{
-       struct twl6040 *twl6040 = irq_data_get_irq_chip_data(data);
-
-       /* write back to hardware any change in irq mask */
-       if (twl6040->irq_masks_cur != twl6040->irq_masks_cache) {
-               twl6040->irq_masks_cache = twl6040->irq_masks_cur;
-               twl6040_reg_write(twl6040, TWL6040_REG_INTMR,
-                                 twl6040->irq_masks_cur);
-       }
-
-       mutex_unlock(&twl6040->irq_mutex);
-}
-
-static void twl6040_irq_enable(struct irq_data *data)
-{
-       struct twl6040 *twl6040 = irq_data_get_irq_chip_data(data);
-       struct twl6040_irq_data *irq_data = irq_to_twl6040_irq(twl6040,
-                                                              data->irq);
-
-       twl6040->irq_masks_cur &= ~irq_data->mask;
-}
-
-static void twl6040_irq_disable(struct irq_data *data)
-{
-       struct twl6040 *twl6040 = irq_data_get_irq_chip_data(data);
-       struct twl6040_irq_data *irq_data = irq_to_twl6040_irq(twl6040,
-                                                              data->irq);
-
-       twl6040->irq_masks_cur |= irq_data->mask;
-}
-
-static struct irq_chip twl6040_irq_chip = {
-       .name                   = "twl6040",
-       .irq_bus_lock           = twl6040_irq_lock,
-       .irq_bus_sync_unlock    = twl6040_irq_sync_unlock,
-       .irq_enable             = twl6040_irq_enable,
-       .irq_disable            = twl6040_irq_disable,
-};
-
-static irqreturn_t twl6040_irq_thread(int irq, void *data)
-{
-       struct twl6040 *twl6040 = data;
-       u8 intid;
-       int i;
-
-       intid = twl6040_reg_read(twl6040, TWL6040_REG_INTID);
-
-       /* apply masking and report (backwards to handle READYINT first) */
-       for (i = ARRAY_SIZE(twl6040_irqs) - 1; i >= 0; i--) {
-               if (twl6040->irq_masks_cur & twl6040_irqs[i].mask)
-                       intid &= ~twl6040_irqs[i].status;
-               if (intid & twl6040_irqs[i].status)
-                       handle_nested_irq(twl6040->irq_base + i);
-       }
-
-       /* ack unmasked irqs */
-       twl6040_reg_write(twl6040, TWL6040_REG_INTID, intid);
-
-       return IRQ_HANDLED;
-}
-
-int twl6040_irq_init(struct twl6040 *twl6040)
-{
-       struct device_node *node = twl6040->dev->of_node;
-       int i, nr_irqs, irq_base, ret;
-       u8 val;
-
-       mutex_init(&twl6040->irq_mutex);
-
-       /* mask the individual interrupt sources */
-       twl6040->irq_masks_cur = TWL6040_ALLINT_MSK;
-       twl6040->irq_masks_cache = TWL6040_ALLINT_MSK;
-       twl6040_reg_write(twl6040, TWL6040_REG_INTMR, TWL6040_ALLINT_MSK);
-
-       nr_irqs = ARRAY_SIZE(twl6040_irqs);
-
-       irq_base = irq_alloc_descs(-1, 0, nr_irqs, 0);
-       if (IS_ERR_VALUE(irq_base)) {
-               dev_err(twl6040->dev, "Fail to allocate IRQ descs\n");
-               return irq_base;
-       }
-       twl6040->irq_base = irq_base;
-
-       irq_domain_add_legacy(node, ARRAY_SIZE(twl6040_irqs), irq_base, 0,
-                             &irq_domain_simple_ops, NULL);
-
-       /* Register them with genirq */
-       for (i = irq_base; i < irq_base + nr_irqs; i++) {
-               irq_set_chip_data(i, twl6040);
-               irq_set_chip_and_handler(i, &twl6040_irq_chip,
-                                        handle_level_irq);
-               irq_set_nested_thread(i, 1);
-
-               /* ARM needs us to explicitly flag the IRQ as valid
-                * and will set them noprobe when we do so. */
-#ifdef CONFIG_ARM
-               set_irq_flags(i, IRQF_VALID);
-#else
-               irq_set_noprobe(i);
-#endif
-       }
-
-       ret = request_threaded_irq(twl6040->irq, NULL, twl6040_irq_thread,
-                                  IRQF_ONESHOT, "twl6040", twl6040);
-       if (ret) {
-               dev_err(twl6040->dev, "failed to request IRQ %d: %d\n",
-                       twl6040->irq, ret);
-               return ret;
-       }
-
-       /* reset interrupts */
-       val = twl6040_reg_read(twl6040, TWL6040_REG_INTID);
-
-       /* interrupts cleared on write */
-       twl6040_clear_bits(twl6040, TWL6040_REG_ACCCTL, TWL6040_INTCLRMODE);
-
-       return 0;
-}
-EXPORT_SYMBOL(twl6040_irq_init);
-
-void twl6040_irq_exit(struct twl6040 *twl6040)
-{
-       free_irq(twl6040->irq, twl6040);
-}
-EXPORT_SYMBOL(twl6040_irq_exit);
similarity index 84%
rename from drivers/mfd/twl6040-core.c
rename to drivers/mfd/twl6040.c
index 3f2a1cf..583be76 100644 (file)
@@ -37,7 +37,6 @@
 #include <linux/delay.h>
 #include <linux/i2c.h>
 #include <linux/regmap.h>
-#include <linux/err.h>
 #include <linux/mfd/core.h>
 #include <linux/mfd/twl6040.h>
 #include <linux/regulator/consumer.h>
@@ -104,7 +103,7 @@ int twl6040_clear_bits(struct twl6040 *twl6040, unsigned int reg, u8 mask)
 EXPORT_SYMBOL(twl6040_clear_bits);
 
 /* twl6040 codec manual power-up sequence */
-static int twl6040_power_up(struct twl6040 *twl6040)
+static int twl6040_power_up_manual(struct twl6040 *twl6040)
 {
        u8 ldoctl, ncpctl, lppllctl;
        int ret;
@@ -158,11 +157,12 @@ ncp_err:
        ldoctl &= ~(TWL6040_HSLDOENA | TWL6040_REFENA | TWL6040_OSCENA);
        twl6040_reg_write(twl6040, TWL6040_REG_LDOCTL, ldoctl);
 
+       dev_err(twl6040->dev, "manual power-up failed\n");
        return ret;
 }
 
 /* twl6040 manual power-down sequence */
-static void twl6040_power_down(struct twl6040 *twl6040)
+static void twl6040_power_down_manual(struct twl6040 *twl6040)
 {
        u8 ncpctl, ldoctl, lppllctl;
 
@@ -192,45 +192,48 @@ static void twl6040_power_down(struct twl6040 *twl6040)
        twl6040_reg_write(twl6040, TWL6040_REG_LDOCTL, ldoctl);
 }
 
-static irqreturn_t twl6040_naudint_handler(int irq, void *data)
+static irqreturn_t twl6040_readyint_handler(int irq, void *data)
 {
        struct twl6040 *twl6040 = data;
-       u8 intid, status;
 
-       intid = twl6040_reg_read(twl6040, TWL6040_REG_INTID);
+       complete(&twl6040->ready);
 
-       if (intid & TWL6040_READYINT)
-               complete(&twl6040->ready);
+       return IRQ_HANDLED;
+}
 
-       if (intid & TWL6040_THINT) {
-               status = twl6040_reg_read(twl6040, TWL6040_REG_STATUS);
-               if (status & TWL6040_TSHUTDET) {
-                       dev_warn(twl6040->dev,
-                                "Thermal shutdown, powering-off");
-                       twl6040_power(twl6040, 0);
-               } else {
-                       dev_warn(twl6040->dev,
-                                "Leaving thermal shutdown, powering-on");
-                       twl6040_power(twl6040, 1);
-               }
+static irqreturn_t twl6040_thint_handler(int irq, void *data)
+{
+       struct twl6040 *twl6040 = data;
+       u8 status;
+
+       status = twl6040_reg_read(twl6040, TWL6040_REG_STATUS);
+       if (status & TWL6040_TSHUTDET) {
+               dev_warn(twl6040->dev, "Thermal shutdown, powering-off");
+               twl6040_power(twl6040, 0);
+       } else {
+               dev_warn(twl6040->dev, "Leaving thermal shutdown, powering-on");
+               twl6040_power(twl6040, 1);
        }
 
        return IRQ_HANDLED;
 }
 
-static int twl6040_power_up_completion(struct twl6040 *twl6040,
-                                      int naudint)
+static int twl6040_power_up_automatic(struct twl6040 *twl6040)
 {
        int time_left;
-       u8 intid;
+
+       gpio_set_value(twl6040->audpwron, 1);
 
        time_left = wait_for_completion_timeout(&twl6040->ready,
                                                msecs_to_jiffies(144));
        if (!time_left) {
+               u8 intid;
+
+               dev_warn(twl6040->dev, "timeout waiting for READYINT\n");
                intid = twl6040_reg_read(twl6040, TWL6040_REG_INTID);
                if (!(intid & TWL6040_READYINT)) {
-                       dev_err(twl6040->dev,
-                               "timeout waiting for READYINT\n");
+                       dev_err(twl6040->dev, "automatic power-up failed\n");
+                       gpio_set_value(twl6040->audpwron, 0);
                        return -ETIMEDOUT;
                }
        }
@@ -240,8 +243,6 @@ static int twl6040_power_up_completion(struct twl6040 *twl6040,
 
 int twl6040_power(struct twl6040 *twl6040, int on)
 {
-       int audpwron = twl6040->audpwron;
-       int naudint = twl6040->irq;
        int ret = 0;
 
        mutex_lock(&twl6040->mutex);
@@ -251,23 +252,17 @@ int twl6040_power(struct twl6040 *twl6040, int on)
                if (twl6040->power_count++)
                        goto out;
 
-               if (gpio_is_valid(audpwron)) {
-                       /* use AUDPWRON line */
-                       gpio_set_value(audpwron, 1);
-                       /* wait for power-up completion */
-                       ret = twl6040_power_up_completion(twl6040, naudint);
+               if (gpio_is_valid(twl6040->audpwron)) {
+                       /* use automatic power-up sequence */
+                       ret = twl6040_power_up_automatic(twl6040);
                        if (ret) {
-                               dev_err(twl6040->dev,
-                                       "automatic power-down failed\n");
                                twl6040->power_count = 0;
                                goto out;
                        }
                } else {
                        /* use manual power-up sequence */
-                       ret = twl6040_power_up(twl6040);
+                       ret = twl6040_power_up_manual(twl6040);
                        if (ret) {
-                               dev_err(twl6040->dev,
-                                       "manual power-up failed\n");
                                twl6040->power_count = 0;
                                goto out;
                        }
@@ -288,15 +283,15 @@ int twl6040_power(struct twl6040 *twl6040, int on)
                if (--twl6040->power_count)
                        goto out;
 
-               if (gpio_is_valid(audpwron)) {
+               if (gpio_is_valid(twl6040->audpwron)) {
                        /* use AUDPWRON line */
-                       gpio_set_value(audpwron, 0);
+                       gpio_set_value(twl6040->audpwron, 0);
 
                        /* power-down sequence latency */
                        usleep_range(500, 700);
                } else {
                        /* use manual power-down sequence */
-                       twl6040_power_down(twl6040);
+                       twl6040_power_down_manual(twl6040);
                }
                twl6040->sysclk = 0;
                twl6040->mclk = 0;
@@ -503,6 +498,25 @@ static struct regmap_config twl6040_regmap_config = {
        .readable_reg = twl6040_readable_reg,
 };
 
+static const struct regmap_irq twl6040_irqs[] = {
+       { .reg_offset = 0, .mask = TWL6040_THINT, },
+       { .reg_offset = 0, .mask = TWL6040_PLUGINT | TWL6040_UNPLUGINT, },
+       { .reg_offset = 0, .mask = TWL6040_HOOKINT, },
+       { .reg_offset = 0, .mask = TWL6040_HFINT, },
+       { .reg_offset = 0, .mask = TWL6040_VIBINT, },
+       { .reg_offset = 0, .mask = TWL6040_READYINT, },
+};
+
+static struct regmap_irq_chip twl6040_irq_chip = {
+       .name = "twl6040",
+       .irqs = twl6040_irqs,
+       .num_irqs = ARRAY_SIZE(twl6040_irqs),
+
+       .num_regs = 1,
+       .status_base = TWL6040_REG_INTID,
+       .mask_base = TWL6040_REG_INTMR,
+};
+
 static int __devinit twl6040_probe(struct i2c_client *client,
                                     const struct i2c_device_id *id)
 {
@@ -578,18 +592,31 @@ static int __devinit twl6040_probe(struct i2c_client *client,
                        goto gpio_err;
        }
 
-       /* codec interrupt */
-       ret = twl6040_irq_init(twl6040);
-       if (ret)
+       ret = regmap_add_irq_chip(twl6040->regmap, twl6040->irq,
+                       IRQF_ONESHOT, 0, &twl6040_irq_chip,
+                       &twl6040->irq_data);
+       if (ret < 0)
                goto irq_init_err;
 
-       ret = request_threaded_irq(twl6040->irq_base + TWL6040_IRQ_READY,
-                                  NULL, twl6040_naudint_handler, IRQF_ONESHOT,
+       twl6040->irq_ready = regmap_irq_get_virq(twl6040->irq_data,
+                                              TWL6040_IRQ_READY);
+       twl6040->irq_th = regmap_irq_get_virq(twl6040->irq_data,
+                                              TWL6040_IRQ_TH);
+
+       ret = request_threaded_irq(twl6040->irq_ready, NULL,
+                                  twl6040_readyint_handler, IRQF_ONESHOT,
                                   "twl6040_irq_ready", twl6040);
        if (ret) {
-               dev_err(twl6040->dev, "READY IRQ request failed: %d\n",
-                       ret);
-               goto irq_err;
+               dev_err(twl6040->dev, "READY IRQ request failed: %d\n", ret);
+               goto readyirq_err;
+       }
+
+       ret = request_threaded_irq(twl6040->irq_th, NULL,
+                                  twl6040_thint_handler, IRQF_ONESHOT,
+                                  "twl6040_irq_th", twl6040);
+       if (ret) {
+               dev_err(twl6040->dev, "Thermal IRQ request failed: %d\n", ret);
+               goto thirq_err;
        }
 
        /* dual-access registers controlled by I2C only */
@@ -601,7 +628,7 @@ static int __devinit twl6040_probe(struct i2c_client *client,
         * The ASoC codec can work without pdata, pass the platform_data only if
         * it has been provided.
         */
-       irq = twl6040->irq_base + TWL6040_IRQ_PLUG;
+       irq = regmap_irq_get_virq(twl6040->irq_data, TWL6040_IRQ_PLUG);
        cell = &twl6040->cells[children];
        cell->name = "twl6040-codec";
        twl6040_codec_rsrc[0].start = irq;
@@ -615,7 +642,7 @@ static int __devinit twl6040_probe(struct i2c_client *client,
        children++;
 
        if (twl6040_has_vibra(pdata, node)) {
-               irq = twl6040->irq_base + TWL6040_IRQ_VIB;
+               irq = regmap_irq_get_virq(twl6040->irq_data, TWL6040_IRQ_VIB);
 
                cell = &twl6040->cells[children];
                cell->name = "twl6040-vibra";
@@ -654,9 +681,11 @@ static int __devinit twl6040_probe(struct i2c_client *client,
        return 0;
 
 mfd_err:
-       free_irq(twl6040->irq_base + TWL6040_IRQ_READY, twl6040);
-irq_err:
-       twl6040_irq_exit(twl6040);
+       free_irq(twl6040->irq_th, twl6040);
+thirq_err:
+       free_irq(twl6040->irq_ready, twl6040);
+readyirq_err:
+       regmap_del_irq_chip(twl6040->irq, twl6040->irq_data);
 irq_init_err:
        if (gpio_is_valid(twl6040->audpwron))
                gpio_free(twl6040->audpwron);
@@ -680,8 +709,9 @@ static int __devexit twl6040_remove(struct i2c_client *client)
        if (gpio_is_valid(twl6040->audpwron))
                gpio_free(twl6040->audpwron);
 
-       free_irq(twl6040->irq_base + TWL6040_IRQ_READY, twl6040);
-       twl6040_irq_exit(twl6040);
+       free_irq(twl6040->irq_ready, twl6040);
+       free_irq(twl6040->irq_th, twl6040);
+       regmap_del_irq_chip(twl6040->irq, twl6040->irq_data);
 
        mfd_remove_devices(&client->dev);
        i2c_set_clientdata(client, NULL);
diff --git a/drivers/mfd/viperboard.c b/drivers/mfd/viperboard.c
new file mode 100644 (file)
index 0000000..af2a670
--- /dev/null
@@ -0,0 +1,137 @@
+/*
+ *  Nano River Technologies viperboard driver
+ *
+ *  This is the core driver for the viperboard. There are cell drivers
+ *  available for I2C, ADC and both GPIOs. SPI is not yet supported.
+ *  The drivers do not support all features the board exposes. See user
+ *  manual of the viperboard.
+ *
+ *  (C) 2012 by Lemonage GmbH
+ *  Author: Lars Poeschel <poeschel@lemonage.de>
+ *  All rights reserved.
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+
+#include <linux/mfd/core.h>
+#include <linux/mfd/viperboard.h>
+
+#include <linux/usb.h>
+
+
+static const struct usb_device_id vprbrd_table[] = {
+       { USB_DEVICE(0x2058, 0x1005) },   /* Nano River Technologies */
+       { }                               /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(usb, vprbrd_table);
+
+static struct mfd_cell vprbrd_devs[] = {
+       {
+               .name = "viperboard-gpio",
+       },
+       {
+               .name = "viperboard-i2c",
+       },
+       {
+               .name = "viperboard-adc",
+       },
+};
+
+static int vprbrd_probe(struct usb_interface *interface,
+                             const struct usb_device_id *id)
+{
+       struct vprbrd *vb;
+
+       u16 version = 0;
+       int pipe, ret;
+
+       /* allocate memory for our device state and initialize it */
+       vb = kzalloc(sizeof(*vb), GFP_KERNEL);
+       if (vb == NULL) {
+               dev_err(&interface->dev, "Out of memory\n");
+               return -ENOMEM;
+       }
+
+       mutex_init(&vb->lock);
+
+       vb->usb_dev = usb_get_dev(interface_to_usbdev(interface));
+
+       /* save our data pointer in this interface device */
+       usb_set_intfdata(interface, vb);
+       dev_set_drvdata(&vb->pdev.dev, vb);
+
+       /* get version information, major first, minor then */
+       pipe = usb_rcvctrlpipe(vb->usb_dev, 0);
+       ret = usb_control_msg(vb->usb_dev, pipe, VPRBRD_USB_REQUEST_MAJOR,
+               VPRBRD_USB_TYPE_IN, 0x0000, 0x0000, vb->buf, 1,
+               VPRBRD_USB_TIMEOUT_MS);
+       if (ret == 1)
+               version = vb->buf[0];
+
+       ret = usb_control_msg(vb->usb_dev, pipe, VPRBRD_USB_REQUEST_MINOR,
+               VPRBRD_USB_TYPE_IN, 0x0000, 0x0000, vb->buf, 1,
+               VPRBRD_USB_TIMEOUT_MS);
+       if (ret == 1) {
+               version <<= 8;
+               version = version | vb->buf[0];
+       }
+
+       dev_info(&interface->dev,
+                "version %x.%02x found at bus %03d address %03d\n",
+                version >> 8, version & 0xff,
+                vb->usb_dev->bus->busnum, vb->usb_dev->devnum);
+
+       ret = mfd_add_devices(&interface->dev, -1, vprbrd_devs,
+                               ARRAY_SIZE(vprbrd_devs), NULL, 0, NULL);
+       if (ret != 0) {
+               dev_err(&interface->dev, "Failed to add mfd devices to core.");
+               goto error;
+       }
+
+       return 0;
+
+error:
+       if (vb) {
+               usb_put_dev(vb->usb_dev);
+               kfree(vb);
+       }
+
+       return ret;
+}
+
+static void vprbrd_disconnect(struct usb_interface *interface)
+{
+       struct vprbrd *vb = usb_get_intfdata(interface);
+
+       mfd_remove_devices(&interface->dev);
+       usb_set_intfdata(interface, NULL);
+       usb_put_dev(vb->usb_dev);
+       kfree(vb);
+
+       dev_dbg(&interface->dev, "disconnected\n");
+}
+
+static struct usb_driver vprbrd_driver = {
+       .name           = "viperboard",
+       .probe          = vprbrd_probe,
+       .disconnect     = vprbrd_disconnect,
+       .id_table       = vprbrd_table,
+};
+
+module_usb_driver(vprbrd_driver);
+
+MODULE_DESCRIPTION("Nano River Technologies viperboard mfd core driver");
+MODULE_AUTHOR("Lars Poeschel <poeschel@lemonage.de>");
+MODULE_LICENSE("GPL");
index 3141c4a..088872a 100644 (file)
@@ -56,6 +56,18 @@ static const struct reg_default wm5102_reva_patch[] = {
        { 0x80, 0x0000 },
 };
 
+static const struct reg_default wm5102_revb_patch[] = {
+       { 0x80, 0x0003 },
+       { 0x081, 0xE022 },
+       { 0x410, 0x6080 },
+       { 0x418, 0x6080 },
+       { 0x420, 0x6080 },
+       { 0x428, 0xC000 },
+       { 0x441, 0x8014 },
+       { 0x458, 0x000b },
+       { 0x80, 0x0000 },
+};
+
 /* We use a function so we can use ARRAY_SIZE() */
 int wm5102_patch(struct arizona *arizona)
 {
@@ -65,7 +77,9 @@ int wm5102_patch(struct arizona *arizona)
                                             wm5102_reva_patch,
                                             ARRAY_SIZE(wm5102_reva_patch));
        default:
-               return 0;
+               return regmap_register_patch(arizona->regmap,
+                                            wm5102_revb_patch,
+                                            ARRAY_SIZE(wm5102_revb_patch));
        }
 }
 
@@ -291,6 +305,7 @@ static const struct reg_default wm5102_reg_default[] = {
        { 0x000001AA, 0x0004 },   /* R426   - FLL2 GPIO Clock */ 
        { 0x00000200, 0x0006 },   /* R512   - Mic Charge Pump 1 */ 
        { 0x00000210, 0x00D4 },   /* R528   - LDO1 Control 1 */ 
+       { 0x00000212, 0x0001 },   /* R530   - LDO1 Control 2 */
        { 0x00000213, 0x0344 },   /* R531   - LDO2 Control 1 */ 
        { 0x00000218, 0x01A6 },   /* R536   - Mic Bias Ctrl 1 */ 
        { 0x00000219, 0x01A6 },   /* R537   - Mic Bias Ctrl 2 */ 
@@ -1056,6 +1071,7 @@ static bool wm5102_readable_register(struct device *dev, unsigned int reg)
        case ARIZONA_FLL1_CONTROL_5:
        case ARIZONA_FLL1_CONTROL_6:
        case ARIZONA_FLL1_LOOP_FILTER_TEST_1:
+       case ARIZONA_FLL1_NCO_TEST_0:
        case ARIZONA_FLL1_SYNCHRONISER_1:
        case ARIZONA_FLL1_SYNCHRONISER_2:
        case ARIZONA_FLL1_SYNCHRONISER_3:
@@ -1071,6 +1087,7 @@ static bool wm5102_readable_register(struct device *dev, unsigned int reg)
        case ARIZONA_FLL2_CONTROL_5:
        case ARIZONA_FLL2_CONTROL_6:
        case ARIZONA_FLL2_LOOP_FILTER_TEST_1:
+       case ARIZONA_FLL2_NCO_TEST_0:
        case ARIZONA_FLL2_SYNCHRONISER_1:
        case ARIZONA_FLL2_SYNCHRONISER_2:
        case ARIZONA_FLL2_SYNCHRONISER_3:
@@ -1805,6 +1822,7 @@ static bool wm5102_readable_register(struct device *dev, unsigned int reg)
        case ARIZONA_DSP1_CLOCKING_1:
        case ARIZONA_DSP1_STATUS_1:
        case ARIZONA_DSP1_STATUS_2:
+       case ARIZONA_DSP1_STATUS_3:
                return true;
        default:
                return false;
@@ -1813,15 +1831,23 @@ static bool wm5102_readable_register(struct device *dev, unsigned int reg)
 
 static bool wm5102_volatile_register(struct device *dev, unsigned int reg)
 {
+       if (reg > 0xffff)
+               return true;
+
        switch (reg) {
        case ARIZONA_SOFTWARE_RESET:
        case ARIZONA_DEVICE_REVISION:
        case ARIZONA_OUTPUT_STATUS_1:
+       case ARIZONA_RAW_OUTPUT_STATUS_1:
+       case ARIZONA_SLIMBUS_RX_PORT_STATUS:
+       case ARIZONA_SLIMBUS_TX_PORT_STATUS:
        case ARIZONA_SAMPLE_RATE_1_STATUS:
        case ARIZONA_SAMPLE_RATE_2_STATUS:
        case ARIZONA_SAMPLE_RATE_3_STATUS:
        case ARIZONA_HAPTICS_STATUS:
        case ARIZONA_ASYNC_SAMPLE_RATE_1_STATUS:
+       case ARIZONA_FLL1_NCO_TEST_0:
+       case ARIZONA_FLL2_NCO_TEST_0:
        case ARIZONA_FX_CTRL2:
        case ARIZONA_INTERRUPT_STATUS_1:
        case ARIZONA_INTERRUPT_STATUS_2:
@@ -1847,6 +1873,7 @@ static bool wm5102_volatile_register(struct device *dev, unsigned int reg)
        case ARIZONA_AOD_IRQ_RAW_STATUS:
        case ARIZONA_DSP1_STATUS_1:
        case ARIZONA_DSP1_STATUS_2:
+       case ARIZONA_DSP1_STATUS_3:
        case ARIZONA_HEADPHONE_DETECT_2:
        case ARIZONA_MIC_DETECT_3:
                return true;
@@ -1855,12 +1882,14 @@ static bool wm5102_volatile_register(struct device *dev, unsigned int reg)
        }
 }
 
+#define WM5102_MAX_REGISTER 0x1a8fff
+
 const struct regmap_config wm5102_spi_regmap = {
        .reg_bits = 32,
        .pad_bits = 16,
        .val_bits = 16,
 
-       .max_register = ARIZONA_DSP1_STATUS_2,
+       .max_register = WM5102_MAX_REGISTER,
        .readable_reg = wm5102_readable_register,
        .volatile_reg = wm5102_volatile_register,
 
@@ -1874,7 +1903,7 @@ const struct regmap_config wm5102_i2c_regmap = {
        .reg_bits = 32,
        .val_bits = 16,
 
-       .max_register = ARIZONA_DSP1_STATUS_2,
+       .max_register = WM5102_MAX_REGISTER,
        .readable_reg = wm5102_readable_register,
        .volatile_reg = wm5102_volatile_register,
 
index bcb226f..57c488d 100644 (file)
@@ -535,11 +535,10 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq)
                        break;
                case 2:
                case 3:
+               default:
                        regmap_patch = wm8994_revc_patch;
                        patch_regs = ARRAY_SIZE(wm8994_revc_patch);
                        break;
-               default:
-                       break;
                }
                break;
 
@@ -558,17 +557,9 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq)
                /* Revision C did not change the relevant layer */
                if (wm8994->revision > 1)
                        wm8994->revision++;
-               switch (wm8994->revision) {
-               case 0:
-               case 1:
-               case 2:
-               case 3:
-                       regmap_patch = wm1811_reva_patch;
-                       patch_regs = ARRAY_SIZE(wm1811_reva_patch);
-                       break;
-               default:
-                       break;
-               }
+
+               regmap_patch = wm1811_reva_patch;
+               patch_regs = ARRAY_SIZE(wm1811_reva_patch);
                break;
 
        default:
index b648058..e4e218c 100644 (file)
@@ -49,6 +49,8 @@ obj-$(CONFIG_MMC_WMT)         += wmt-sdmmc.o
 
 obj-$(CONFIG_MMC_REALTEK_PCI)  += rtsx_pci_sdmmc.o
 
+obj-$(CONFIG_MMC_REALTEK_PCI)  += rtsx_pci_sdmmc.o
+
 obj-$(CONFIG_MMC_SDHCI_PLTFM)          += sdhci-pltfm.o
 obj-$(CONFIG_MMC_SDHCI_CNS3XXX)                += sdhci-cns3xxx.o
 obj-$(CONFIG_MMC_SDHCI_ESDHC_IMX)      += sdhci-esdhc-imx.o
index 12eff6f..571915d 100644 (file)
@@ -21,6 +21,7 @@
  */
 
 #include <linux/module.h>
+#include <linux/slab.h>
 #include <linux/highmem.h>
 #include <linux/delay.h>
 #include <linux/platform_device.h>
@@ -382,8 +383,6 @@ static int sd_rw_multi(struct realtek_pci_sdmmc *host, struct mmc_request *mrq)
                        0xFF, (u8)data->blocks);
        rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BLOCK_CNT_H,
                        0xFF, (u8)(data->blocks >> 8));
-       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
-                       CARD_DATA_SOURCE, 0x01, RING_BUFFER);
 
        rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, IRQSTAT0,
                        DMA_DONE_INT, DMA_DONE_INT);
@@ -407,6 +406,7 @@ static int sd_rw_multi(struct realtek_pci_sdmmc *host, struct mmc_request *mrq)
        rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_DATA_SOURCE,
                        0x01, RING_BUFFER);
 
+       rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG2, 0xFF, cfg2);
        rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_TRANSFER, 0xFF,
                        trans_mode | SD_TRANSFER_START);
        rtsx_pci_add_cmd(pcr, CHECK_REG_CMD, SD_TRANSFER,
index bb0df89..3c5c2e4 100644 (file)
@@ -440,8 +440,10 @@ static int da9052_bat_check_health(struct da9052_battery *bat, int *health)
 static irqreturn_t da9052_bat_irq(int irq, void *data)
 {
        struct da9052_battery *bat = data;
+       int virq;
 
-       irq -= bat->da9052->irq_base;
+       virq = regmap_irq_get_virq(bat->da9052->irq_data, irq);
+       irq -= virq;
 
        if (irq == DA9052_IRQ_CHGEND)
                bat->status = POWER_SUPPLY_STATUS_FULL;
@@ -567,7 +569,7 @@ static struct power_supply template_battery = {
        .get_property   = da9052_bat_get_property,
 };
 
-static const char *const da9052_bat_irqs[] = {
+static char *da9052_bat_irqs[] = {
        "BATT TEMP",
        "DCIN DET",
        "DCIN REM",
@@ -576,12 +578,20 @@ static const char *const da9052_bat_irqs[] = {
        "CHG END",
 };
 
+static int da9052_bat_irq_bits[] = {
+       DA9052_IRQ_TBAT,
+       DA9052_IRQ_DCIN,
+       DA9052_IRQ_DCINREM,
+       DA9052_IRQ_VBUS,
+       DA9052_IRQ_VBUSREM,
+       DA9052_IRQ_CHGEND,
+};
+
 static s32 da9052_bat_probe(struct platform_device *pdev)
 {
        struct da9052_pdata *pdata;
        struct da9052_battery *bat;
        int ret;
-       int irq;
        int i;
 
        bat = kzalloc(sizeof(struct da9052_battery), GFP_KERNEL);
@@ -602,15 +612,14 @@ static s32 da9052_bat_probe(struct platform_device *pdev)
                bat->psy.use_for_apm = 1;
 
        for (i = 0; i < ARRAY_SIZE(da9052_bat_irqs); i++) {
-               irq = platform_get_irq_byname(pdev, da9052_bat_irqs[i]);
-               ret = request_threaded_irq(bat->da9052->irq_base + irq,
-                                          NULL, da9052_bat_irq,
-                                          IRQF_TRIGGER_LOW | IRQF_ONESHOT,
-                                          da9052_bat_irqs[i], bat);
+               ret = da9052_request_irq(bat->da9052,
+                               da9052_bat_irq_bits[i], da9052_bat_irqs[i],
+                               da9052_bat_irq, bat);
+
                if (ret != 0) {
                        dev_err(bat->da9052->dev,
-                               "DA9052 failed to request %s IRQ %d: %d\n",
-                               da9052_bat_irqs[i], irq, ret);
+                               "DA9052 failed to request %s IRQ: %d\n",
+                               da9052_bat_irqs[i], ret);
                        goto err;
                }
        }
@@ -623,23 +632,20 @@ static s32 da9052_bat_probe(struct platform_device *pdev)
        return 0;
 
 err:
-       while (--i >= 0) {
-               irq = platform_get_irq_byname(pdev, da9052_bat_irqs[i]);
-               free_irq(bat->da9052->irq_base + irq, bat);
-       }
+       while (--i >= 0)
+               da9052_free_irq(bat->da9052, da9052_bat_irq_bits[i], bat);
+
        kfree(bat);
        return ret;
 }
 static int da9052_bat_remove(struct platform_device *pdev)
 {
        int i;
-       int irq;
        struct da9052_battery *bat = platform_get_drvdata(pdev);
 
-       for (i = 0; i < ARRAY_SIZE(da9052_bat_irqs); i++) {
-               irq = platform_get_irq_byname(pdev, da9052_bat_irqs[i]);
-               free_irq(bat->da9052->irq_base + irq, bat);
-       }
+       for (i = 0; i < ARRAY_SIZE(da9052_bat_irqs); i++)
+               da9052_free_irq(bat->da9052, da9052_bat_irq_bits[i], bat);
+
        power_supply_unregister(&bat->psy);
        kfree(bat);
 
index 9277d94..8b7464c 100644 (file)
@@ -233,7 +233,7 @@ static int twl_rtc_alarm_irq_enable(struct device *dev, unsigned enabled)
  */
 static int twl_rtc_read_time(struct device *dev, struct rtc_time *tm)
 {
-       unsigned char rtc_data[ALL_TIME_REGS + 1];
+       unsigned char rtc_data[ALL_TIME_REGS];
        int ret;
        u8 save_control;
        u8 rtc_control;
@@ -300,15 +300,15 @@ static int twl_rtc_read_time(struct device *dev, struct rtc_time *tm)
 static int twl_rtc_set_time(struct device *dev, struct rtc_time *tm)
 {
        unsigned char save_control;
-       unsigned char rtc_data[ALL_TIME_REGS + 1];
+       unsigned char rtc_data[ALL_TIME_REGS];
        int ret;
 
-       rtc_data[1] = bin2bcd(tm->tm_sec);
-       rtc_data[2] = bin2bcd(tm->tm_min);
-       rtc_data[3] = bin2bcd(tm->tm_hour);
-       rtc_data[4] = bin2bcd(tm->tm_mday);
-       rtc_data[5] = bin2bcd(tm->tm_mon + 1);
-       rtc_data[6] = bin2bcd(tm->tm_year - 100);
+       rtc_data[0] = bin2bcd(tm->tm_sec);
+       rtc_data[1] = bin2bcd(tm->tm_min);
+       rtc_data[2] = bin2bcd(tm->tm_hour);
+       rtc_data[3] = bin2bcd(tm->tm_mday);
+       rtc_data[4] = bin2bcd(tm->tm_mon + 1);
+       rtc_data[5] = bin2bcd(tm->tm_year - 100);
 
        /* Stop RTC while updating the TC registers */
        ret = twl_rtc_read_u8(&save_control, REG_RTC_CTRL_REG);
@@ -341,7 +341,7 @@ out:
  */
 static int twl_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
 {
-       unsigned char rtc_data[ALL_TIME_REGS + 1];
+       unsigned char rtc_data[ALL_TIME_REGS];
        int ret;
 
        ret = twl_i2c_read(TWL_MODULE_RTC, rtc_data,
@@ -368,19 +368,19 @@ static int twl_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
 
 static int twl_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
 {
-       unsigned char alarm_data[ALL_TIME_REGS + 1];
+       unsigned char alarm_data[ALL_TIME_REGS];
        int ret;
 
        ret = twl_rtc_alarm_irq_enable(dev, 0);
        if (ret)
                goto out;
 
-       alarm_data[1] = bin2bcd(alm->time.tm_sec);
-       alarm_data[2] = bin2bcd(alm->time.tm_min);
-       alarm_data[3] = bin2bcd(alm->time.tm_hour);
-       alarm_data[4] = bin2bcd(alm->time.tm_mday);
-       alarm_data[5] = bin2bcd(alm->time.tm_mon + 1);
-       alarm_data[6] = bin2bcd(alm->time.tm_year - 100);
+       alarm_data[0] = bin2bcd(alm->time.tm_sec);
+       alarm_data[1] = bin2bcd(alm->time.tm_min);
+       alarm_data[2] = bin2bcd(alm->time.tm_hour);
+       alarm_data[3] = bin2bcd(alm->time.tm_mday);
+       alarm_data[4] = bin2bcd(alm->time.tm_mon + 1);
+       alarm_data[5] = bin2bcd(alm->time.tm_year - 100);
 
        /* update all the alarm registers in one shot */
        ret = twl_i2c_write(TWL_MODULE_RTC, alarm_data,
index ad1dc14..80f4b84 100644 (file)
@@ -1264,8 +1264,27 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
        struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess)
 {
        struct qla_hw_data *ha = vha->hw;
+       struct se_session *se_sess = sess->se_sess;
        struct qla_tgt_mgmt_cmd *mcmd;
+       struct se_cmd *se_cmd;
+       u32 lun = 0;
        int rc;
+       bool found_lun = false;
+
+       spin_lock(&se_sess->sess_cmd_lock);
+       list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) {
+               struct qla_tgt_cmd *cmd =
+                       container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
+               if (cmd->tag == abts->exchange_addr_to_abort) {
+                       lun = cmd->unpacked_lun;
+                       found_lun = true;
+                       break;
+               }
+       }
+       spin_unlock(&se_sess->sess_cmd_lock);
+
+       if (!found_lun)
+               return -ENOENT;
 
        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
            "qla_target(%d): task abort (tag=%d)\n",
@@ -1283,7 +1302,7 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
        mcmd->sess = sess;
        memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
 
-       rc = ha->tgt.tgt_ops->handle_tmr(mcmd, 0, TMR_ABORT_TASK,
+       rc = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, TMR_ABORT_TASK,
            abts->exchange_addr_to_abort);
        if (rc != 0) {
                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf052,
index 4372e32..d182c96 100644 (file)
@@ -620,8 +620,8 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
                        return;
                }
 
-               cmd->se_cmd.scsi_sense_reason = TCM_CHECK_CONDITION_ABORT_CMD;
-               transport_generic_request_failure(&cmd->se_cmd);
+               transport_generic_request_failure(&cmd->se_cmd,
+                                                 TCM_CHECK_CONDITION_ABORT_CMD);
                return;
        }
 
index 9032e91..f1bf5af 100644 (file)
@@ -1418,7 +1418,7 @@ static int scsi_lld_busy(struct request_queue *q)
        struct scsi_device *sdev = q->queuedata;
        struct Scsi_Host *shost;
 
-       if (blk_queue_dead(q))
+       if (blk_queue_dying(q))
                return 0;
 
        shost = sdev->host;
index 035c2c7..339f97f 100644 (file)
@@ -735,7 +735,7 @@ static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn)
        list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
                spin_lock(&cmd->istate_lock);
                if ((cmd->i_state == ISTATE_SENT_STATUS) &&
-                   (cmd->stat_sn < exp_statsn)) {
+                   iscsi_sna_lt(cmd->stat_sn, exp_statsn)) {
                        cmd->i_state = ISTATE_REMOVE;
                        spin_unlock(&cmd->istate_lock);
                        iscsit_add_cmd_to_immediate_queue(cmd, conn,
@@ -767,9 +767,8 @@ static int iscsit_handle_scsi_cmd(
        struct iscsi_conn *conn,
        unsigned char *buf)
 {
-       int     data_direction, cmdsn_ret = 0, immed_ret, ret, transport_ret;
-       int     dump_immediate_data = 0, send_check_condition = 0, payload_length;
-       struct iscsi_cmd        *cmd = NULL;
+       int data_direction, payload_length, cmdsn_ret = 0, immed_ret;
+       struct iscsi_cmd *cmd = NULL;
        struct iscsi_scsi_req *hdr;
        int iscsi_task_attr;
        int sam_task_attr;
@@ -956,38 +955,26 @@ done:
                " ExpXferLen: %u, Length: %u, CID: %hu\n", hdr->itt,
                hdr->cmdsn, hdr->data_length, payload_length, conn->cid);
 
-       /*
-        * The CDB is going to an se_device_t.
-        */
-       ret = transport_lookup_cmd_lun(&cmd->se_cmd,
-                                      scsilun_to_int(&hdr->lun));
-       if (ret < 0) {
-               if (cmd->se_cmd.scsi_sense_reason == TCM_NON_EXISTENT_LUN) {
-                       pr_debug("Responding to non-acl'ed,"
-                               " non-existent or non-exported iSCSI LUN:"
-                               " 0x%016Lx\n", get_unaligned_le64(&hdr->lun));
+       cmd->sense_reason = transport_lookup_cmd_lun(&cmd->se_cmd,
+                                                    scsilun_to_int(&hdr->lun));
+       if (cmd->sense_reason)
+               goto attach_cmd;
+
+       cmd->sense_reason = target_setup_cmd_from_cdb(&cmd->se_cmd, hdr->cdb);
+       if (cmd->sense_reason) {
+               if (cmd->sense_reason == TCM_OUT_OF_RESOURCES) {
+                       return iscsit_add_reject_from_cmd(
+                                       ISCSI_REASON_BOOKMARK_NO_RESOURCES,
+                                       1, 1, buf, cmd);
                }
-               send_check_condition = 1;
+
                goto attach_cmd;
        }
 
-       transport_ret = target_setup_cmd_from_cdb(&cmd->se_cmd, hdr->cdb);
-       if (transport_ret == -ENOMEM) {
+       if (iscsit_build_pdu_and_seq_lists(cmd, payload_length) < 0) {
                return iscsit_add_reject_from_cmd(
-                               ISCSI_REASON_BOOKMARK_NO_RESOURCES,
-                               1, 1, buf, cmd);
-       } else if (transport_ret < 0) {
-               /*
-                * Unsupported SAM Opcode.  CHECK_CONDITION will be sent
-                * in iscsit_execute_cmd() during the CmdSN OOO Execution
-                * Mechinism.
-                */
-               send_check_condition = 1;
-       } else {
-               if (iscsit_build_pdu_and_seq_lists(cmd, payload_length) < 0)
-                       return iscsit_add_reject_from_cmd(
-                               ISCSI_REASON_BOOKMARK_NO_RESOURCES,
-                               1, 1, buf, cmd);
+                       ISCSI_REASON_BOOKMARK_NO_RESOURCES,
+                       1, 1, buf, cmd);
        }
 
 attach_cmd:
@@ -1000,11 +987,12 @@ attach_cmd:
         */
        core_alua_check_nonop_delay(&cmd->se_cmd);
 
-       ret = iscsit_allocate_iovecs(cmd);
-       if (ret < 0)
+       if (iscsit_allocate_iovecs(cmd) < 0) {
                return iscsit_add_reject_from_cmd(
                                ISCSI_REASON_BOOKMARK_NO_RESOURCES,
                                1, 0, buf, cmd);
+       }
+
        /*
         * Check the CmdSN against ExpCmdSN/MaxCmdSN here if
         * the Immediate Bit is not set, and no Immediate
@@ -1031,10 +1019,7 @@ attach_cmd:
         * If no Immediate Data is attached, it's OK to return now.
         */
        if (!cmd->immediate_data) {
-               if (send_check_condition)
-                       return 0;
-
-               if (cmd->unsolicited_data) {
+               if (!cmd->sense_reason && cmd->unsolicited_data) {
                        iscsit_set_dataout_sequence_values(cmd);
 
                        spin_lock_bh(&cmd->dataout_timeout_lock);
@@ -1050,19 +1035,17 @@ attach_cmd:
         * thread.  They are processed in CmdSN order by
         * iscsit_check_received_cmdsn() below.
         */
-       if (send_check_condition) {
+       if (cmd->sense_reason) {
                immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
-               dump_immediate_data = 1;
                goto after_immediate_data;
        }
        /*
         * Call directly into transport_generic_new_cmd() to perform
         * the backend memory allocation.
         */
-       ret = transport_generic_new_cmd(&cmd->se_cmd);
-       if (ret < 0) {
+       cmd->sense_reason = transport_generic_new_cmd(&cmd->se_cmd);
+       if (cmd->sense_reason) {
                immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
-               dump_immediate_data = 1;
                goto after_immediate_data;
        }
 
@@ -1079,7 +1062,7 @@ after_immediate_data:
                 * Special case for Unsupported SAM WRITE Opcodes
                 * and ImmediateData=Yes.
                 */
-               if (dump_immediate_data) {
+               if (cmd->sense_reason) {
                        if (iscsit_dump_data_payload(conn, payload_length, 1) < 0)
                                return -1;
                } else if (cmd->unsolicited_data) {
@@ -1272,8 +1255,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
                spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
 
                spin_lock_irqsave(&se_cmd->t_state_lock, flags);
-               if (!(se_cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) ||
-                    (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION))
+               if (!(se_cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE))
                        dump_unsolicited_data = 1;
                spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
 
@@ -1742,7 +1724,6 @@ static int iscsit_handle_task_mgt_cmd(
                ret = transport_lookup_tmr_lun(&cmd->se_cmd,
                                               scsilun_to_int(&hdr->lun));
                if (ret < 0) {
-                       cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
                        se_tmr->response = ISCSI_TMF_RSP_NO_LUN;
                        goto attach;
                }
@@ -1751,10 +1732,8 @@ static int iscsit_handle_task_mgt_cmd(
        switch (function) {
        case ISCSI_TM_FUNC_ABORT_TASK:
                se_tmr->response = iscsit_tmr_abort_task(cmd, buf);
-               if (se_tmr->response != ISCSI_TMF_RSP_COMPLETE) {
-                       cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+               if (se_tmr->response)
                        goto attach;
-               }
                break;
        case ISCSI_TM_FUNC_ABORT_TASK_SET:
        case ISCSI_TM_FUNC_CLEAR_ACA:
@@ -1763,14 +1742,12 @@ static int iscsit_handle_task_mgt_cmd(
                break;
        case ISCSI_TM_FUNC_TARGET_WARM_RESET:
                if (iscsit_tmr_task_warm_reset(conn, tmr_req, buf) < 0) {
-                       cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
                        se_tmr->response = ISCSI_TMF_RSP_AUTH_FAILED;
                        goto attach;
                }
                break;
        case ISCSI_TM_FUNC_TARGET_COLD_RESET:
                if (iscsit_tmr_task_cold_reset(conn, tmr_req, buf) < 0) {
-                       cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
                        se_tmr->response = ISCSI_TMF_RSP_AUTH_FAILED;
                        goto attach;
                }
@@ -1781,7 +1758,7 @@ static int iscsit_handle_task_mgt_cmd(
                 * Perform sanity checks on the ExpDataSN only if the
                 * TASK_REASSIGN was successful.
                 */
-               if (se_tmr->response != ISCSI_TMF_RSP_COMPLETE)
+               if (se_tmr->response)
                        break;
 
                if (iscsit_check_task_reassign_expdatasn(tmr_req, conn) < 0)
@@ -1792,7 +1769,6 @@ static int iscsit_handle_task_mgt_cmd(
        default:
                pr_err("Unknown TMR function: 0x%02x, protocol"
                        " error.\n", function);
-               cmd->se_cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
                se_tmr->response = ISCSI_TMF_RSP_NOT_SUPPORTED;
                goto attach;
        }
@@ -2360,7 +2336,7 @@ static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn)
        if (!conn_p)
                return;
 
-       cmd = iscsit_allocate_cmd(conn_p, GFP_KERNEL);
+       cmd = iscsit_allocate_cmd(conn_p, GFP_ATOMIC);
        if (!cmd) {
                iscsit_dec_conn_usage_count(conn_p);
                return;
index 0f03b79..78d75c8 100644 (file)
@@ -754,9 +754,33 @@ static ssize_t lio_target_nacl_store_cmdsn_depth(
 
 TF_NACL_BASE_ATTR(lio_target, cmdsn_depth, S_IRUGO | S_IWUSR);
 
+static ssize_t lio_target_nacl_show_tag(
+       struct se_node_acl *se_nacl,
+       char *page)
+{
+       return snprintf(page, PAGE_SIZE, "%s", se_nacl->acl_tag);
+}
+
+static ssize_t lio_target_nacl_store_tag(
+       struct se_node_acl *se_nacl,
+       const char *page,
+       size_t count)
+{
+       int ret;
+
+       ret = core_tpg_set_initiator_node_tag(se_nacl->se_tpg, se_nacl, page);
+
+       if (ret < 0)
+               return ret;
+       return count;
+}
+
+TF_NACL_BASE_ATTR(lio_target, tag, S_IRUGO | S_IWUSR);
+
 static struct configfs_attribute *lio_target_initiator_attrs[] = {
        &lio_target_nacl_info.attr,
        &lio_target_nacl_cmdsn_depth.attr,
+       &lio_target_nacl_tag.attr,
        NULL,
 };
 
@@ -803,7 +827,7 @@ static struct se_node_acl *lio_target_make_nodeacl(
        acl = container_of(se_nacl, struct iscsi_node_acl, se_node_acl);
        stats_cg = &se_nacl->acl_fabric_stat_group;
 
-       stats_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+       stats_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
                                GFP_KERNEL);
        if (!stats_cg->default_groups) {
                pr_err("Unable to allocate memory for"
@@ -1268,7 +1292,7 @@ static struct se_wwn *lio_target_call_coreaddtiqn(
         */
        stats_cg = &tiqn->tiqn_wwn.fabric_stat_group;
 
-       stats_cg->default_groups = kzalloc(sizeof(struct config_group) * 6,
+       stats_cg->default_groups = kmalloc(sizeof(struct config_group *) * 6,
                                GFP_KERNEL);
        if (!stats_cg->default_groups) {
                pr_err("Unable to allocate memory for"
index 21048db..7a333d2 100644 (file)
@@ -474,7 +474,7 @@ struct iscsi_cmd {
        struct scatterlist      *first_data_sg;
        u32                     first_data_sg_off;
        u32                     kmapped_nents;
-
+       sense_reason_t          sense_reason;
 }  ____cacheline_aligned;
 
 struct iscsi_tmr_req {
index 21f29d9..0b52a23 100644 (file)
@@ -929,11 +929,10 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
        case ISCSI_OP_SCSI_CMD:
                /*
                 * Go ahead and send the CHECK_CONDITION status for
-                * any SCSI CDB exceptions that may have occurred, also
-                * handle the SCF_SCSI_RESERVATION_CONFLICT case here as well.
+                * any SCSI CDB exceptions that may have occurred.
                 */
-               if (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION) {
-                       if (se_cmd->scsi_sense_reason == TCM_RESERVATION_CONFLICT) {
+               if (cmd->sense_reason) {
+                       if (cmd->sense_reason == TCM_RESERVATION_CONFLICT) {
                                cmd->i_state = ISTATE_SEND_STATUS;
                                spin_unlock_bh(&cmd->istate_lock);
                                iscsit_add_cmd_to_response_queue(cmd, cmd->conn,
@@ -956,7 +955,7 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
                         * exception
                         */
                        return transport_send_check_condition_and_sense(se_cmd,
-                                       se_cmd->scsi_sense_reason, 0);
+                                       cmd->sense_reason, 0);
                }
                /*
                 * Special case for delayed CmdSN with Immediate
@@ -1013,7 +1012,7 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
                iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
                break;
        case ISCSI_OP_SCSI_TMFUNC:
-               if (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION) {
+               if (cmd->se_cmd.se_tmr_req->response) {
                        spin_unlock_bh(&cmd->istate_lock);
                        iscsit_add_cmd_to_response_queue(cmd, cmd->conn,
                                        cmd->i_state);
index 17d8c20..9ac4c15 100644 (file)
@@ -372,7 +372,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
                 * made generic here.
                 */
                if (!(cmd->cmd_flags & ICF_OOO_CMDSN) && !cmd->immediate_cmd &&
-                    (cmd->cmd_sn >= conn->sess->exp_cmd_sn)) {
+                    iscsi_sna_gte(cmd->stat_sn, conn->sess->exp_cmd_sn)) {
                        list_del(&cmd->i_conn_node);
                        spin_unlock_bh(&conn->cmd_lock);
                        iscsit_free_cmd(cmd);
index f8dbec0..fdb632f 100644 (file)
@@ -127,13 +127,13 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
 
        initiatorname_param = iscsi_find_param_from_key(
                        INITIATORNAME, conn->param_list);
-       if (!initiatorname_param)
-               return -1;
-
        sessiontype_param = iscsi_find_param_from_key(
                        SESSIONTYPE, conn->param_list);
-       if (!sessiontype_param)
+       if (!initiatorname_param || !sessiontype_param) {
+               iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+                       ISCSI_LOGIN_STATUS_MISSING_FIELDS);
                return -1;
+       }
 
        sessiontype = (strncmp(sessiontype_param->value, NORMAL, 6)) ? 1 : 0;
 
@@ -254,9 +254,9 @@ static int iscsi_login_zero_tsih_s1(
                kfree(sess);
                return -ENOMEM;
        }
-       spin_lock(&sess_idr_lock);
+       spin_lock_bh(&sess_idr_lock);
        ret = idr_get_new(&sess_idr, NULL, &sess->session_index);
-       spin_unlock(&sess_idr_lock);
+       spin_unlock_bh(&sess_idr_lock);
 
        if (ret < 0) {
                pr_err("idr_get_new() for sess_idr failed\n");
@@ -1118,10 +1118,8 @@ new_sess_out:
                idr_remove(&sess_idr, conn->sess->session_index);
                spin_unlock_bh(&sess_idr_lock);
        }
-       if (conn->sess->sess_ops)
-               kfree(conn->sess->sess_ops);
-       if (conn->sess)
-               kfree(conn->sess);
+       kfree(conn->sess->sess_ops);
+       kfree(conn->sess);
 old_sess_out:
        iscsi_stop_login_thread_timer(np);
        /*
index e9053a0..9d902ae 100644 (file)
@@ -620,8 +620,11 @@ static int iscsi_target_handle_csg_one(struct iscsi_conn *conn, struct iscsi_log
                        login->req_buf,
                        payload_length,
                        conn);
-       if (ret < 0)
+       if (ret < 0) {
+               iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+                               ISCSI_LOGIN_STATUS_INIT_ERR);
                return -1;
+       }
 
        if (login->first_request)
                if (iscsi_target_check_first_request(conn, login) < 0)
@@ -636,8 +639,11 @@ static int iscsi_target_handle_csg_one(struct iscsi_conn *conn, struct iscsi_log
                        login->rsp_buf,
                        &login->rsp_length,
                        conn->param_list);
-       if (ret < 0)
+       if (ret < 0) {
+               iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
+                               ISCSI_LOGIN_STATUS_INIT_ERR);
                return -1;
+       }
 
        if (!login->auth_complete &&
             ISCSI_TPG_ATTRIB(ISCSI_TPG_C(conn))->authentication) {
index 1bf7432..d891642 100644 (file)
@@ -154,22 +154,18 @@ static struct iscsi_param *iscsi_set_default_param(struct iscsi_param_list *para
        }
        INIT_LIST_HEAD(&param->p_list);
 
-       param->name = kzalloc(strlen(name) + 1, GFP_KERNEL);
+       param->name = kstrdup(name, GFP_KERNEL);
        if (!param->name) {
                pr_err("Unable to allocate memory for parameter name.\n");
                goto out;
        }
 
-       param->value = kzalloc(strlen(value) + 1, GFP_KERNEL);
+       param->value = kstrdup(value, GFP_KERNEL);
        if (!param->value) {
                pr_err("Unable to allocate memory for parameter value.\n");
                goto out;
        }
 
-       memcpy(param->name, name, strlen(name));
-       param->name[strlen(name)] = '\0';
-       memcpy(param->value, value, strlen(value));
-       param->value[strlen(value)] = '\0';
        param->phase            = phase;
        param->scope            = scope;
        param->sender           = sender;
@@ -635,11 +631,8 @@ void iscsi_release_param_list(struct iscsi_param_list *param_list)
                list_del(&param->p_list);
 
                kfree(param->name);
-               param->name = NULL;
                kfree(param->value);
-               param->value = NULL;
                kfree(param);
-               param = NULL;
        }
 
        iscsi_release_extra_responses(param_list);
@@ -687,15 +680,12 @@ int iscsi_update_param_value(struct iscsi_param *param, char *value)
 {
        kfree(param->value);
 
-       param->value = kzalloc(strlen(value) + 1, GFP_KERNEL);
+       param->value = kstrdup(value, GFP_KERNEL);
        if (!param->value) {
                pr_err("Unable to allocate memory for value.\n");
                return -ENOMEM;
        }
 
-       memcpy(param->value, value, strlen(value));
-       param->value[strlen(value)] = '\0';
-
        pr_debug("iSCSI Parameter updated to %s=%s\n",
                        param->name, param->value);
        return 0;
index 4a99820..9d4417a 100644 (file)
@@ -50,8 +50,8 @@ u8 iscsit_tmr_abort_task(
        if (!ref_cmd) {
                pr_err("Unable to locate RefTaskTag: 0x%08x on CID:"
                        " %hu.\n", hdr->rtt, conn->cid);
-               return (be32_to_cpu(hdr->refcmdsn) >= conn->sess->exp_cmd_sn &&
-                       be32_to_cpu(hdr->refcmdsn) <= conn->sess->max_cmd_sn) ?
+               return (iscsi_sna_gte(be32_to_cpu(hdr->refcmdsn), conn->sess->exp_cmd_sn) &&
+                       iscsi_sna_lte(be32_to_cpu(hdr->refcmdsn), conn->sess->max_cmd_sn)) ?
                        ISCSI_TMF_RSP_COMPLETE : ISCSI_TMF_RSP_NO_TASK;
        }
        if (ref_cmd->cmd_sn != be32_to_cpu(hdr->refcmdsn)) {
index 9d881a0..8128952 100644 (file)
@@ -66,8 +66,7 @@ static struct iscsi_thread_set *iscsi_get_ts_from_inactive_list(void)
                return NULL;
        }
 
-       list_for_each_entry(ts, &inactive_ts_list, ts_list)
-               break;
+       ts = list_first_entry(&inactive_ts_list, struct iscsi_thread_set, ts_list);
 
        list_del(&ts->ts_list);
        iscsit_global->inactive_ts--;
index 69e0cfd..7ce3505 100644 (file)
@@ -500,8 +500,8 @@ struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *c
                spin_unlock_bh(&conn->immed_queue_lock);
                return NULL;
        }
-       list_for_each_entry(qr, &conn->immed_queue_list, qr_list)
-               break;
+       qr = list_first_entry(&conn->immed_queue_list,
+                             struct iscsi_queue_req, qr_list);
 
        list_del(&qr->qr_list);
        if (qr->cmd)
@@ -575,8 +575,8 @@ struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *co
                return NULL;
        }
 
-       list_for_each_entry(qr, &conn->response_queue_list, qr_list)
-               break;
+       qr = list_first_entry(&conn->response_queue_list,
+                             struct iscsi_queue_req, qr_list);
 
        list_del(&qr->qr_list);
        if (qr->cmd)
index 7b54893..dd7a84e 100644 (file)
@@ -53,7 +53,6 @@ struct tcm_loop_hba {
        struct se_hba_s *se_hba;
        struct se_lun *tl_hba_lun;
        struct se_port *tl_hba_lun_sep;
-       struct se_device_s *se_dev_hba_ptr;
        struct tcm_loop_nexus *tl_nexus;
        struct device dev;
        struct Scsi_Host *sh;
index 132da54..1614bc7 100644 (file)
@@ -1,6 +1,6 @@
 config SBP_TARGET
        tristate "FireWire SBP-2 fabric module"
-       depends on FIREWIRE && EXPERIMENTAL
+       depends on FIREWIRE
        help
          Say Y or M here to enable SCSI target functionality over FireWire.
          This enables you to expose SCSI devices to other nodes on the FireWire
index 0d6d7c1..2e8d06f 100644 (file)
@@ -704,16 +704,17 @@ static void session_maintenance_work(struct work_struct *work)
 static int tgt_agent_rw_agent_state(struct fw_card *card, int tcode, void *data,
                struct sbp_target_agent *agent)
 {
-       __be32 state;
+       int state;
 
        switch (tcode) {
        case TCODE_READ_QUADLET_REQUEST:
                pr_debug("tgt_agent AGENT_STATE READ\n");
 
                spin_lock_bh(&agent->lock);
-               state = cpu_to_be32(agent->state);
+               state = agent->state;
                spin_unlock_bh(&agent->lock);
-               memcpy(data, &state, sizeof(state));
+
+               *(__be32 *)data = cpu_to_be32(state);
 
                return RCODE_COMPLETE;
 
@@ -2207,20 +2208,23 @@ static struct se_portal_group *sbp_make_tpg(
        tport->mgt_agt = sbp_management_agent_register(tport);
        if (IS_ERR(tport->mgt_agt)) {
                ret = PTR_ERR(tport->mgt_agt);
-               kfree(tpg);
-               return ERR_PTR(ret);
+               goto out_free_tpg;
        }
 
        ret = core_tpg_register(&sbp_fabric_configfs->tf_ops, wwn,
                        &tpg->se_tpg, (void *)tpg,
                        TRANSPORT_TPG_TYPE_NORMAL);
-       if (ret < 0) {
-               sbp_management_agent_unregister(tport->mgt_agt);
-               kfree(tpg);
-               return ERR_PTR(ret);
-       }
+       if (ret < 0)
+               goto out_unreg_mgt_agt;
 
        return &tpg->se_tpg;
+
+out_unreg_mgt_agt:
+       sbp_management_agent_unregister(tport->mgt_agt);
+out_free_tpg:
+       tport->tpg = NULL;
+       kfree(tpg);
+       return ERR_PTR(ret);
 }
 
 static void sbp_drop_tpg(struct se_portal_group *se_tpg)
index 9a5f9a7..85140f7 100644 (file)
@@ -3,8 +3,7 @@
  *
  * This file contains SPC-3 compliant asymmetric logical unit assigntment (ALUA)
  *
- * Copyright (c) 2009-2010 Rising Tide Systems
- * Copyright (c) 2009-2010 Linux-iSCSI.org
+ * (c) Copyright 2009-2012 RisingTide Systems LLC.
  *
  * Nicholas A. Bellinger <nab@kernel.org>
  *
@@ -41,7 +40,7 @@
 #include "target_core_alua.h"
 #include "target_core_ua.h"
 
-static int core_alua_check_transition(int state, int *primary);
+static sense_reason_t core_alua_check_transition(int state, int *primary);
 static int core_alua_set_tg_pt_secondary_state(
                struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
                struct se_port *port, int explict, int offline);
@@ -59,15 +58,17 @@ struct t10_alua_lu_gp *default_lu_gp;
  *
  * See spc4r17 section 6.27
  */
-int target_emulate_report_target_port_groups(struct se_cmd *cmd)
+sense_reason_t
+target_emulate_report_target_port_groups(struct se_cmd *cmd)
 {
-       struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
+       struct se_device *dev = cmd->se_dev;
        struct se_port *port;
        struct t10_alua_tg_pt_gp *tg_pt_gp;
        struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
        unsigned char *buf;
        u32 rd_len = 0, off;
        int ext_hdr = (cmd->t_task_cdb[1] & 0x20);
+
        /*
         * Skip over RESERVED area to first Target port group descriptor
         * depending on the PARAMETER DATA FORMAT type..
@@ -81,13 +82,14 @@ int target_emulate_report_target_port_groups(struct se_cmd *cmd)
                pr_warn("REPORT TARGET PORT GROUPS allocation length %u too"
                        " small for %s header\n", cmd->data_length,
                        (ext_hdr) ? "extended" : "normal");
-               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
-               return -EINVAL;
+               return TCM_INVALID_CDB_FIELD;
        }
        buf = transport_kmap_data_sg(cmd);
+       if (!buf)
+               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 
-       spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
-       list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list,
+       spin_lock(&dev->t10_alua.tg_pt_gps_lock);
+       list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
                        tg_pt_gp_list) {
                /*
                 * Check if the Target port group and Target port descriptor list
@@ -160,7 +162,7 @@ int target_emulate_report_target_port_groups(struct se_cmd *cmd)
                }
                spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
        }
-       spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
+       spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
        /*
         * Set the RETURN DATA LENGTH set in the header of the DataIN Payload
         */
@@ -200,32 +202,33 @@ int target_emulate_report_target_port_groups(struct se_cmd *cmd)
  *
  * See spc4r17 section 6.35
  */
-int target_emulate_set_target_port_groups(struct se_cmd *cmd)
+sense_reason_t
+target_emulate_set_target_port_groups(struct se_cmd *cmd)
 {
        struct se_device *dev = cmd->se_dev;
-       struct se_subsystem_dev *su_dev = dev->se_sub_dev;
        struct se_port *port, *l_port = cmd->se_lun->lun_sep;
        struct se_node_acl *nacl = cmd->se_sess->se_node_acl;
        struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp;
        struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *l_tg_pt_gp_mem;
        unsigned char *buf;
        unsigned char *ptr;
+       sense_reason_t rc;
        u32 len = 4; /* Skip over RESERVED area in header */
-       int alua_access_state, primary = 0, rc;
+       int alua_access_state, primary = 0;
        u16 tg_pt_id, rtpi;
 
-       if (!l_port) {
-               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-               return -EINVAL;
-       }
+       if (!l_port)
+               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
        if (cmd->data_length < 4) {
                pr_warn("SET TARGET PORT GROUPS parameter list length %u too"
                        " small\n", cmd->data_length);
-               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
-               return -EINVAL;
+               return TCM_INVALID_PARAMETER_LIST;
        }
 
        buf = transport_kmap_data_sg(cmd);
+       if (!buf)
+               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 
        /*
         * Determine if explict ALUA via SET_TARGET_PORT_GROUPS is allowed
@@ -234,8 +237,7 @@ int target_emulate_set_target_port_groups(struct se_cmd *cmd)
        l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem;
        if (!l_tg_pt_gp_mem) {
                pr_err("Unable to access l_port->sep_alua_tg_pt_gp_mem\n");
-               cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
-               rc = -EINVAL;
+               rc = TCM_UNSUPPORTED_SCSI_OPCODE;
                goto out;
        }
        spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
@@ -243,24 +245,22 @@ int target_emulate_set_target_port_groups(struct se_cmd *cmd)
        if (!l_tg_pt_gp) {
                spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
                pr_err("Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n");
-               cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
-               rc = -EINVAL;
+               rc = TCM_UNSUPPORTED_SCSI_OPCODE;
                goto out;
        }
-       rc = (l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA);
        spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
 
-       if (!rc) {
+       if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA)) {
                pr_debug("Unable to process SET_TARGET_PORT_GROUPS"
                                " while TPGS_EXPLICT_ALUA is disabled\n");
-               cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
-               rc = -EINVAL;
+               rc = TCM_UNSUPPORTED_SCSI_OPCODE;
                goto out;
        }
 
        ptr = &buf[4]; /* Skip over RESERVED area in header */
 
        while (len < cmd->data_length) {
+               bool found = false;
                alua_access_state = (ptr[0] & 0x0f);
                /*
                 * Check the received ALUA access state, and determine if
@@ -268,7 +268,7 @@ int target_emulate_set_target_port_groups(struct se_cmd *cmd)
                 * access state.
                 */
                rc = core_alua_check_transition(alua_access_state, &primary);
-               if (rc != 0) {
+               if (rc) {
                        /*
                         * If the SET TARGET PORT GROUPS attempts to establish
                         * an invalid combination of target port asymmetric
@@ -279,11 +279,9 @@ int target_emulate_set_target_port_groups(struct se_cmd *cmd)
                         * REQUEST, and the additional sense code set to INVALID
                         * FIELD IN PARAMETER LIST.
                         */
-                       cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
-                       rc = -EINVAL;
                        goto out;
                }
-               rc = -1;
+
                /*
                 * If the ASYMMETRIC ACCESS STATE field (see table 267)
                 * specifies a primary target port asymmetric access state,
@@ -303,9 +301,9 @@ int target_emulate_set_target_port_groups(struct se_cmd *cmd)
                         * Locate the matching target port group ID from
                         * the global tg_pt_gp list
                         */
-                       spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
+                       spin_lock(&dev->t10_alua.tg_pt_gps_lock);
                        list_for_each_entry(tg_pt_gp,
-                                       &su_dev->t10_alua.tg_pt_gps_list,
+                                       &dev->t10_alua.tg_pt_gps_list,
                                        tg_pt_gp_list) {
                                if (!tg_pt_gp->tg_pt_gp_valid_id)
                                        continue;
@@ -315,27 +313,20 @@ int target_emulate_set_target_port_groups(struct se_cmd *cmd)
 
                                atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
                                smp_mb__after_atomic_inc();
-                               spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
 
-                               rc = core_alua_do_port_transition(tg_pt_gp,
+                               spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
+
+                               if (!core_alua_do_port_transition(tg_pt_gp,
                                                dev, l_port, nacl,
-                                               alua_access_state, 1);
+                                               alua_access_state, 1))
+                                       found = true;
 
-                               spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
+                               spin_lock(&dev->t10_alua.tg_pt_gps_lock);
                                atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
                                smp_mb__after_atomic_dec();
                                break;
                        }
-                       spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
-                       /*
-                        * If not matching target port group ID can be located
-                        * throw an exception with ASCQ: INVALID_PARAMETER_LIST
-                        */
-                       if (rc != 0) {
-                               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
-                               rc = -EINVAL;
-                               goto out;
-                       }
+                       spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
                } else {
                        /*
                         * Extact the RELATIVE TARGET PORT IDENTIFIER to identify
@@ -354,25 +345,22 @@ int target_emulate_set_target_port_groups(struct se_cmd *cmd)
                                        continue;
 
                                tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
+
                                spin_unlock(&dev->se_port_lock);
 
-                               rc = core_alua_set_tg_pt_secondary_state(
-                                               tg_pt_gp_mem, port, 1, 1);
+                               if (!core_alua_set_tg_pt_secondary_state(
+                                               tg_pt_gp_mem, port, 1, 1))
+                                       found = true;
 
                                spin_lock(&dev->se_port_lock);
                                break;
                        }
                        spin_unlock(&dev->se_port_lock);
-                       /*
-                        * If not matching relative target port identifier can
-                        * be located, throw an exception with ASCQ:
-                        * INVALID_PARAMETER_LIST
-                        */
-                       if (rc != 0) {
-                               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
-                               rc = -EINVAL;
-                               goto out;
-                       }
+               }
+
+               if (!found) {
+                       rc = TCM_INVALID_PARAMETER_LIST;
+                       goto out;
                }
 
                ptr += 4;
@@ -523,40 +511,27 @@ static inline int core_alua_state_transition(
 }
 
 /*
- * Used for alua_type SPC_ALUA_PASSTHROUGH and SPC2_ALUA_DISABLED
- * in transport_cmd_sequencer().  This function is assigned to
- * struct t10_alua *->state_check() in core_setup_alua()
- */
-static int core_alua_state_check_nop(
-       struct se_cmd *cmd,
-       unsigned char *cdb,
-       u8 *alua_ascq)
-{
-       return 0;
-}
-
-/*
- * Used for alua_type SPC3_ALUA_EMULATED in transport_cmd_sequencer().
- * This function is assigned to struct t10_alua *->state_check() in
- * core_setup_alua()
- *
- * Also, this function can return three different return codes to
- * signal transport_generic_cmd_sequencer()
- *
  * return 1: Is used to signal LUN not accecsable, and check condition/not ready
  * return 0: Used to signal success
  * reutrn -1: Used to signal failure, and invalid cdb field
  */
-static int core_alua_state_check(
-       struct se_cmd *cmd,
-       unsigned char *cdb,
-       u8 *alua_ascq)
+sense_reason_t
+target_alua_state_check(struct se_cmd *cmd)
 {
+       struct se_device *dev = cmd->se_dev;
+       unsigned char *cdb = cmd->t_task_cdb;
        struct se_lun *lun = cmd->se_lun;
        struct se_port *port = lun->lun_sep;
        struct t10_alua_tg_pt_gp *tg_pt_gp;
        struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
        int out_alua_state, nonop_delay_msecs;
+       u8 alua_ascq;
+       int ret;
+
+       if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)
+               return 0;
+       if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+               return 0;
 
        if (!port)
                return 0;
@@ -565,11 +540,11 @@ static int core_alua_state_check(
         * access state: OFFLINE
         */
        if (atomic_read(&port->sep_tg_pt_secondary_offline)) {
-               *alua_ascq = ASCQ_04H_ALUA_OFFLINE;
                pr_debug("ALUA: Got secondary offline status for local"
                                " target port\n");
-               *alua_ascq = ASCQ_04H_ALUA_OFFLINE;
-               return 1;
+               alua_ascq = ASCQ_04H_ALUA_OFFLINE;
+               ret = 1;
+               goto out;
        }
         /*
         * Second, obtain the struct t10_alua_tg_pt_gp_member pointer to the
@@ -594,14 +569,18 @@ static int core_alua_state_check(
 
        switch (out_alua_state) {
        case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
-               return core_alua_state_nonoptimized(cmd, cdb,
-                                       nonop_delay_msecs, alua_ascq);
+               ret = core_alua_state_nonoptimized(cmd, cdb,
+                                       nonop_delay_msecs, &alua_ascq);
+               break;
        case ALUA_ACCESS_STATE_STANDBY:
-               return core_alua_state_standby(cmd, cdb, alua_ascq);
+               ret = core_alua_state_standby(cmd, cdb, &alua_ascq);
+               break;
        case ALUA_ACCESS_STATE_UNAVAILABLE:
-               return core_alua_state_unavailable(cmd, cdb, alua_ascq);
+               ret = core_alua_state_unavailable(cmd, cdb, &alua_ascq);
+               break;
        case ALUA_ACCESS_STATE_TRANSITION:
-               return core_alua_state_transition(cmd, cdb, alua_ascq);
+               ret = core_alua_state_transition(cmd, cdb, &alua_ascq);
+               break;
        /*
         * OFFLINE is a secondary ALUA target port group access state, that is
         * handled above with struct se_port->sep_tg_pt_secondary_offline=1
@@ -610,7 +589,24 @@ static int core_alua_state_check(
        default:
                pr_err("Unknown ALUA access state: 0x%02x\n",
                                out_alua_state);
-               return -EINVAL;
+               return TCM_INVALID_CDB_FIELD;
+       }
+
+out:
+       if (ret > 0) {
+               /*
+                * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
+                * The ALUA additional sense code qualifier (ASCQ) is determined
+                * by the ALUA primary or secondary access state..
+                */
+               pr_debug("[%s]: ALUA TG Port not available, "
+                       "SenseKey: NOT_READY, ASC/ASCQ: "
+                       "0x04/0x%02x\n",
+                       cmd->se_tfo->get_fabric_name(), alua_ascq);
+
+               cmd->scsi_asc = 0x04;
+               cmd->scsi_ascq = alua_ascq;
+               return TCM_CHECK_CONDITION_NOT_READY;
        }
 
        return 0;
@@ -619,7 +615,8 @@ static int core_alua_state_check(
 /*
  * Check implict and explict ALUA state change request.
  */
-static int core_alua_check_transition(int state, int *primary)
+static sense_reason_t
+core_alua_check_transition(int state, int *primary)
 {
        switch (state) {
        case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED:
@@ -641,7 +638,7 @@ static int core_alua_check_transition(int state, int *primary)
                break;
        default:
                pr_err("Unknown ALUA access state: 0x%02x\n", state);
-               return -EINVAL;
+               return TCM_INVALID_PARAMETER_LIST;
        }
 
        return 0;
@@ -758,8 +755,7 @@ static int core_alua_update_tpg_primary_metadata(
        int primary_state,
        unsigned char *md_buf)
 {
-       struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
-       struct t10_wwn *wwn = &su_dev->t10_wwn;
+       struct t10_wwn *wwn = &tg_pt_gp->tg_pt_gp_dev->t10_wwn;
        char path[ALUA_METADATA_PATH_LEN];
        int len;
 
@@ -899,7 +895,6 @@ int core_alua_do_port_transition(
 {
        struct se_device *dev;
        struct se_port *port;
-       struct se_subsystem_dev *su_dev;
        struct se_node_acl *nacl;
        struct t10_alua_lu_gp *lu_gp;
        struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem;
@@ -949,14 +944,13 @@ int core_alua_do_port_transition(
                                lu_gp_mem_list) {
 
                dev = lu_gp_mem->lu_gp_mem_dev;
-               su_dev = dev->se_sub_dev;
                atomic_inc(&lu_gp_mem->lu_gp_mem_ref_cnt);
                smp_mb__after_atomic_inc();
                spin_unlock(&lu_gp->lu_gp_lock);
 
-               spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
+               spin_lock(&dev->t10_alua.tg_pt_gps_lock);
                list_for_each_entry(tg_pt_gp,
-                               &su_dev->t10_alua.tg_pt_gps_list,
+                               &dev->t10_alua.tg_pt_gps_list,
                                tg_pt_gp_list) {
 
                        if (!tg_pt_gp->tg_pt_gp_valid_id)
@@ -981,7 +975,7 @@ int core_alua_do_port_transition(
                        }
                        atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
                        smp_mb__after_atomic_inc();
-                       spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
+                       spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
                        /*
                         * core_alua_do_transition_tg_pt() will always return
                         * success.
@@ -989,11 +983,11 @@ int core_alua_do_port_transition(
                        core_alua_do_transition_tg_pt(tg_pt_gp, port,
                                        nacl, md_buf, new_state, explict);
 
-                       spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
+                       spin_lock(&dev->t10_alua.tg_pt_gps_lock);
                        atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
                        smp_mb__after_atomic_dec();
                }
-               spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
+               spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
 
                spin_lock(&lu_gp->lu_gp_lock);
                atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt);
@@ -1268,14 +1262,9 @@ void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
 
 void core_alua_free_lu_gp_mem(struct se_device *dev)
 {
-       struct se_subsystem_dev *su_dev = dev->se_sub_dev;
-       struct t10_alua *alua = &su_dev->t10_alua;
        struct t10_alua_lu_gp *lu_gp;
        struct t10_alua_lu_gp_member *lu_gp_mem;
 
-       if (alua->alua_type != SPC3_ALUA_EMULATED)
-               return;
-
        lu_gp_mem = dev->dev_alua_lu_gp_mem;
        if (!lu_gp_mem)
                return;
@@ -1358,10 +1347,8 @@ void __core_alua_drop_lu_gp_mem(
        spin_unlock(&lu_gp->lu_gp_lock);
 }
 
-struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
-       struct se_subsystem_dev *su_dev,
-       const char *name,
-       int def_group)
+struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev,
+               const char *name, int def_group)
 {
        struct t10_alua_tg_pt_gp *tg_pt_gp;
 
@@ -1375,7 +1362,7 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
        mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex);
        spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
        atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
-       tg_pt_gp->tg_pt_gp_su_dev = su_dev;
+       tg_pt_gp->tg_pt_gp_dev = dev;
        tg_pt_gp->tg_pt_gp_md_buf_len = ALUA_MD_BUF_LEN;
        atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
                ALUA_ACCESS_STATE_ACTIVE_OPTMIZED);
@@ -1392,14 +1379,14 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
        tg_pt_gp->tg_pt_gp_implict_trans_secs = ALUA_DEFAULT_IMPLICT_TRANS_SECS;
 
        if (def_group) {
-               spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
+               spin_lock(&dev->t10_alua.tg_pt_gps_lock);
                tg_pt_gp->tg_pt_gp_id =
-                               su_dev->t10_alua.alua_tg_pt_gps_counter++;
+                               dev->t10_alua.alua_tg_pt_gps_counter++;
                tg_pt_gp->tg_pt_gp_valid_id = 1;
-               su_dev->t10_alua.alua_tg_pt_gps_count++;
+               dev->t10_alua.alua_tg_pt_gps_count++;
                list_add_tail(&tg_pt_gp->tg_pt_gp_list,
-                             &su_dev->t10_alua.tg_pt_gps_list);
-               spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
+                             &dev->t10_alua.tg_pt_gps_list);
+               spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
        }
 
        return tg_pt_gp;
@@ -1409,9 +1396,10 @@ int core_alua_set_tg_pt_gp_id(
        struct t10_alua_tg_pt_gp *tg_pt_gp,
        u16 tg_pt_gp_id)
 {
-       struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
+       struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
        struct t10_alua_tg_pt_gp *tg_pt_gp_tmp;
        u16 tg_pt_gp_id_tmp;
+
        /*
         * The tg_pt_gp->tg_pt_gp_id may only be set once..
         */
@@ -1421,19 +1409,19 @@ int core_alua_set_tg_pt_gp_id(
                return -EINVAL;
        }
 
-       spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
-       if (su_dev->t10_alua.alua_tg_pt_gps_count == 0x0000ffff) {
+       spin_lock(&dev->t10_alua.tg_pt_gps_lock);
+       if (dev->t10_alua.alua_tg_pt_gps_count == 0x0000ffff) {
                pr_err("Maximum ALUA alua_tg_pt_gps_count:"
                        " 0x0000ffff reached\n");
-               spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
+               spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
                kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
                return -ENOSPC;
        }
 again:
        tg_pt_gp_id_tmp = (tg_pt_gp_id != 0) ? tg_pt_gp_id :
-                       su_dev->t10_alua.alua_tg_pt_gps_counter++;
+                       dev->t10_alua.alua_tg_pt_gps_counter++;
 
-       list_for_each_entry(tg_pt_gp_tmp, &su_dev->t10_alua.tg_pt_gps_list,
+       list_for_each_entry(tg_pt_gp_tmp, &dev->t10_alua.tg_pt_gps_list,
                        tg_pt_gp_list) {
                if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) {
                        if (!tg_pt_gp_id)
@@ -1441,7 +1429,7 @@ again:
 
                        pr_err("ALUA Target Port Group ID: %hu already"
                                " exists, ignoring request\n", tg_pt_gp_id);
-                       spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
+                       spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
                        return -EINVAL;
                }
        }
@@ -1449,9 +1437,9 @@ again:
        tg_pt_gp->tg_pt_gp_id = tg_pt_gp_id_tmp;
        tg_pt_gp->tg_pt_gp_valid_id = 1;
        list_add_tail(&tg_pt_gp->tg_pt_gp_list,
-                       &su_dev->t10_alua.tg_pt_gps_list);
-       su_dev->t10_alua.alua_tg_pt_gps_count++;
-       spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
+                       &dev->t10_alua.tg_pt_gps_list);
+       dev->t10_alua.alua_tg_pt_gps_count++;
+       spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
 
        return 0;
 }
@@ -1480,8 +1468,9 @@ struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
 void core_alua_free_tg_pt_gp(
        struct t10_alua_tg_pt_gp *tg_pt_gp)
 {
-       struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
+       struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
        struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *tg_pt_gp_mem_tmp;
+
        /*
         * Once we have reached this point, config_item_put() has already
         * been called from target_core_alua_drop_tg_pt_gp().
@@ -1490,10 +1479,11 @@ void core_alua_free_tg_pt_gp(
         * no assications *OR* explict ALUA via SET_TARGET_PORT_GROUPS
         * can be made while we are releasing struct t10_alua_tg_pt_gp.
         */
-       spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
+       spin_lock(&dev->t10_alua.tg_pt_gps_lock);
        list_del(&tg_pt_gp->tg_pt_gp_list);
-       su_dev->t10_alua.alua_tg_pt_gps_counter--;
-       spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
+       dev->t10_alua.alua_tg_pt_gps_counter--;
+       spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
+
        /*
         * Allow a struct t10_alua_tg_pt_gp_member * referenced by
         * core_alua_get_tg_pt_gp_by_name() in
@@ -1502,6 +1492,7 @@ void core_alua_free_tg_pt_gp(
         */
        while (atomic_read(&tg_pt_gp->tg_pt_gp_ref_cnt))
                cpu_relax();
+
        /*
         * Release reference to struct t10_alua_tg_pt_gp from all associated
         * struct se_port.
@@ -1525,9 +1516,9 @@ void core_alua_free_tg_pt_gp(
                 * default_tg_pt_gp.
                 */
                spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
-               if (tg_pt_gp != su_dev->t10_alua.default_tg_pt_gp) {
+               if (tg_pt_gp != dev->t10_alua.default_tg_pt_gp) {
                        __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
-                                       su_dev->t10_alua.default_tg_pt_gp);
+                                       dev->t10_alua.default_tg_pt_gp);
                } else
                        tg_pt_gp_mem->tg_pt_gp = NULL;
                spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
@@ -1541,14 +1532,9 @@ void core_alua_free_tg_pt_gp(
 
 void core_alua_free_tg_pt_gp_mem(struct se_port *port)
 {
-       struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
-       struct t10_alua *alua = &su_dev->t10_alua;
        struct t10_alua_tg_pt_gp *tg_pt_gp;
        struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
 
-       if (alua->alua_type != SPC3_ALUA_EMULATED)
-               return;
-
        tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
        if (!tg_pt_gp_mem)
                return;
@@ -1574,25 +1560,24 @@ void core_alua_free_tg_pt_gp_mem(struct se_port *port)
 }
 
 static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name(
-       struct se_subsystem_dev *su_dev,
-       const char *name)
+               struct se_device *dev, const char *name)
 {
        struct t10_alua_tg_pt_gp *tg_pt_gp;
        struct config_item *ci;
 
-       spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
-       list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list,
+       spin_lock(&dev->t10_alua.tg_pt_gps_lock);
+       list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
                        tg_pt_gp_list) {
                if (!tg_pt_gp->tg_pt_gp_valid_id)
                        continue;
                ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
                if (!strcmp(config_item_name(ci), name)) {
                        atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
-                       spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
+                       spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
                        return tg_pt_gp;
                }
        }
-       spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
+       spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
 
        return NULL;
 }
@@ -1600,11 +1585,11 @@ static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name(
 static void core_alua_put_tg_pt_gp_from_name(
        struct t10_alua_tg_pt_gp *tg_pt_gp)
 {
-       struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
+       struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
 
-       spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
+       spin_lock(&dev->t10_alua.tg_pt_gps_lock);
        atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
-       spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
+       spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
 }
 
 /*
@@ -1640,16 +1625,11 @@ static void __core_alua_drop_tg_pt_gp_mem(
 
 ssize_t core_alua_show_tg_pt_gp_info(struct se_port *port, char *page)
 {
-       struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
        struct config_item *tg_pt_ci;
-       struct t10_alua *alua = &su_dev->t10_alua;
        struct t10_alua_tg_pt_gp *tg_pt_gp;
        struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
        ssize_t len = 0;
 
-       if (alua->alua_type != SPC3_ALUA_EMULATED)
-               return len;
-
        tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
        if (!tg_pt_gp_mem)
                return len;
@@ -1683,7 +1663,7 @@ ssize_t core_alua_store_tg_pt_gp_info(
 {
        struct se_portal_group *tpg;
        struct se_lun *lun;
-       struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
+       struct se_device *dev = port->sep_lun->lun_se_dev;
        struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *tg_pt_gp_new = NULL;
        struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
        unsigned char buf[TG_PT_GROUP_NAME_BUF];
@@ -1692,13 +1672,9 @@ ssize_t core_alua_store_tg_pt_gp_info(
        tpg = port->sep_tpg;
        lun = port->sep_lun;
 
-       if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) {
-               pr_warn("SPC3_ALUA_EMULATED not enabled for"
-                       " %s/tpgt_%hu/%s\n", tpg->se_tpg_tfo->tpg_get_wwn(tpg),
-                       tpg->se_tpg_tfo->tpg_get_tag(tpg),
-                       config_item_name(&lun->lun_group.cg_item));
-               return -EINVAL;
-       }
+       tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
+       if (!tg_pt_gp_mem)
+               return 0;
 
        if (count > TG_PT_GROUP_NAME_BUF) {
                pr_err("ALUA Target Port Group alias too large!\n");
@@ -1716,18 +1692,11 @@ ssize_t core_alua_store_tg_pt_gp_info(
                 * struct t10_alua_tg_pt_gp.  This reference is released with
                 * core_alua_put_tg_pt_gp_from_name() below.
                 */
-               tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(su_dev,
+               tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(dev,
                                        strstrip(buf));
                if (!tg_pt_gp_new)
                        return -ENODEV;
        }
-       tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
-       if (!tg_pt_gp_mem) {
-               if (tg_pt_gp_new)
-                       core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new);
-               pr_err("NULL struct se_port->sep_alua_tg_pt_gp_mem pointer\n");
-               return -EINVAL;
-       }
 
        spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
        tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
@@ -1750,7 +1719,7 @@ ssize_t core_alua_store_tg_pt_gp_info(
 
                        __core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp);
                        __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
-                                       su_dev->t10_alua.default_tg_pt_gp);
+                                       dev->t10_alua.default_tg_pt_gp);
                        spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
 
                        return count;
@@ -2054,32 +2023,12 @@ ssize_t core_alua_store_secondary_write_metadata(
        return count;
 }
 
-int core_setup_alua(struct se_device *dev, int force_pt)
+int core_setup_alua(struct se_device *dev)
 {
-       struct se_subsystem_dev *su_dev = dev->se_sub_dev;
-       struct t10_alua *alua = &su_dev->t10_alua;
-       struct t10_alua_lu_gp_member *lu_gp_mem;
-       /*
-        * If this device is from Target_Core_Mod/pSCSI, use the ALUA logic
-        * of the Underlying SCSI hardware.  In Linux/SCSI terms, this can
-        * cause a problem because libata and some SATA RAID HBAs appear
-        * under Linux/SCSI, but emulate SCSI logic themselves.
-        */
-       if (((dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) &&
-           !(dev->se_sub_dev->se_dev_attrib.emulate_alua)) || force_pt) {
-               alua->alua_type = SPC_ALUA_PASSTHROUGH;
-               alua->alua_state_check = &core_alua_state_check_nop;
-               pr_debug("%s: Using SPC_ALUA_PASSTHROUGH, no ALUA"
-                       " emulation\n", dev->transport->name);
-               return 0;
-       }
-       /*
-        * If SPC-3 or above is reported by real or emulated struct se_device,
-        * use emulated ALUA.
-        */
-       if (dev->transport->get_device_rev(dev) >= SCSI_3) {
-               pr_debug("%s: Enabling ALUA Emulation for SPC-3"
-                       " device\n", dev->transport->name);
+       if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV &&
+           !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) {
+               struct t10_alua_lu_gp_member *lu_gp_mem;
+
                /*
                 * Associate this struct se_device with the default ALUA
                 * LUN Group.
@@ -2088,8 +2037,6 @@ int core_setup_alua(struct se_device *dev, int force_pt)
                if (IS_ERR(lu_gp_mem))
                        return PTR_ERR(lu_gp_mem);
 
-               alua->alua_type = SPC3_ALUA_EMULATED;
-               alua->alua_state_check = &core_alua_state_check;
                spin_lock(&lu_gp_mem->lu_gp_mem_lock);
                __core_alua_attach_lu_gp_mem(lu_gp_mem,
                                default_lu_gp);
@@ -2098,11 +2045,6 @@ int core_setup_alua(struct se_device *dev, int force_pt)
                pr_debug("%s: Adding to default ALUA LU Group:"
                        " core/alua/lu_gps/default_lu_gp\n",
                        dev->transport->name);
-       } else {
-               alua->alua_type = SPC2_ALUA_DISABLED;
-               alua->alua_state_check = &core_alua_state_check_nop;
-               pr_debug("%s: Disabling ALUA Emulation for SPC-2"
-                       " device\n", dev->transport->name);
        }
 
        return 0;
index f920c17..e539c3e 100644 (file)
@@ -72,8 +72,8 @@ extern struct kmem_cache *t10_alua_lu_gp_mem_cache;
 extern struct kmem_cache *t10_alua_tg_pt_gp_cache;
 extern struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
 
-extern int target_emulate_report_target_port_groups(struct se_cmd *);
-extern int target_emulate_set_target_port_groups(struct se_cmd *);
+extern sense_reason_t target_emulate_report_target_port_groups(struct se_cmd *);
+extern sense_reason_t target_emulate_set_target_port_groups(struct se_cmd *);
 extern int core_alua_check_nonop_delay(struct se_cmd *);
 extern int core_alua_do_port_transition(struct t10_alua_tg_pt_gp *,
                                struct se_device *, struct se_port *,
@@ -91,7 +91,7 @@ extern void __core_alua_drop_lu_gp_mem(struct t10_alua_lu_gp_member *,
                                        struct t10_alua_lu_gp *);
 extern void core_alua_drop_lu_gp_dev(struct se_device *);
 extern struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
-                       struct se_subsystem_dev *, const char *, int);
+                       struct se_device *, const char *, int);
 extern int core_alua_set_tg_pt_gp_id(struct t10_alua_tg_pt_gp *, u16);
 extern struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
                                        struct se_port *);
@@ -131,6 +131,7 @@ extern ssize_t core_alua_show_secondary_write_metadata(struct se_lun *,
                                        char *);
 extern ssize_t core_alua_store_secondary_write_metadata(struct se_lun *,
                                        const char *, size_t);
-extern int core_setup_alua(struct se_device *, int);
+extern int core_setup_alua(struct se_device *);
+extern sense_reason_t target_alua_state_check(struct se_cmd *cmd);
 
 #endif /* TARGET_CORE_ALUA_H */
index c123327..4efb61b 100644 (file)
@@ -3,8 +3,7 @@
  *
  * This file contains ConfigFS logic for the Generic Target Engine project.
  *
- * Copyright (c) 2008-2011 Rising Tide Systems
- * Copyright (c) 2008-2011 Linux-iSCSI.org
+ * (c) Copyright 2008-2012 RisingTide Systems LLC.
  *
  * Nicholas A. Bellinger <nab@kernel.org>
  *
@@ -565,21 +564,8 @@ static ssize_t target_core_dev_show_attr_##_name(                  \
        struct se_dev_attrib *da,                                       \
        char *page)                                                     \
 {                                                                      \
-       struct se_device *dev;                                          \
-       struct se_subsystem_dev *se_dev = da->da_sub_dev;                       \
-       ssize_t rb;                                                     \
-                                                                       \
-       spin_lock(&se_dev->se_dev_lock);                                \
-       dev = se_dev->se_dev_ptr;                                       \
-       if (!dev) {                                                     \
-               spin_unlock(&se_dev->se_dev_lock);                      \
-               return -ENODEV;                                         \
-       }                                                               \
-       rb = snprintf(page, PAGE_SIZE, "%u\n",                          \
-               (u32)dev->se_sub_dev->se_dev_attrib._name);             \
-       spin_unlock(&se_dev->se_dev_lock);                              \
-                                                                       \
-       return rb;                                                      \
+       return snprintf(page, PAGE_SIZE, "%u\n",                        \
+               (u32)da->da_dev->dev_attrib._name);                     \
 }
 
 #define DEF_DEV_ATTRIB_STORE(_name)                                    \
@@ -588,26 +574,16 @@ static ssize_t target_core_dev_store_attr_##_name(                        \
        const char *page,                                               \
        size_t count)                                                   \
 {                                                                      \
-       struct se_device *dev;                                          \
-       struct se_subsystem_dev *se_dev = da->da_sub_dev;                       \
        unsigned long val;                                              \
        int ret;                                                        \
                                                                        \
-       spin_lock(&se_dev->se_dev_lock);                                \
-       dev = se_dev->se_dev_ptr;                                       \
-       if (!dev) {                                                     \
-               spin_unlock(&se_dev->se_dev_lock);                      \
-               return -ENODEV;                                         \
-       }                                                               \
        ret = strict_strtoul(page, 0, &val);                            \
        if (ret < 0) {                                                  \
-               spin_unlock(&se_dev->se_dev_lock);                      \
                pr_err("strict_strtoul() failed with"           \
                        " ret: %d\n", ret);                             \
                return -EINVAL;                                         \
        }                                                               \
-       ret = se_dev_set_##_name(dev, (u32)val);                        \
-       spin_unlock(&se_dev->se_dev_lock);                              \
+       ret = se_dev_set_##_name(da->da_dev, (u32)val);                 \
                                                                        \
        return (!ret) ? count : -EINVAL;                                \
 }
@@ -699,6 +675,9 @@ SE_DEV_ATTR(unmap_granularity, S_IRUGO | S_IWUSR);
 DEF_DEV_ATTRIB(unmap_granularity_alignment);
 SE_DEV_ATTR(unmap_granularity_alignment, S_IRUGO | S_IWUSR);
 
+DEF_DEV_ATTRIB(max_write_same_len);
+SE_DEV_ATTR(max_write_same_len, S_IRUGO | S_IWUSR);
+
 CONFIGFS_EATTR_OPS(target_core_dev_attrib, se_dev_attrib, da_group);
 
 static struct configfs_attribute *target_core_dev_attrib_attrs[] = {
@@ -724,6 +703,7 @@ static struct configfs_attribute *target_core_dev_attrib_attrs[] = {
        &target_core_dev_attrib_max_unmap_block_desc_count.attr,
        &target_core_dev_attrib_unmap_granularity.attr,
        &target_core_dev_attrib_unmap_granularity_alignment.attr,
+       &target_core_dev_attrib_max_write_same_len.attr,
        NULL,
 };
 
@@ -764,13 +744,6 @@ static ssize_t target_core_dev_wwn_show_attr_vpd_unit_serial(
        struct t10_wwn *t10_wwn,
        char *page)
 {
-       struct se_subsystem_dev *se_dev = t10_wwn->t10_sub_dev;
-       struct se_device *dev;
-
-       dev = se_dev->se_dev_ptr;
-       if (!dev)
-               return -ENODEV;
-
        return sprintf(page, "T10 VPD Unit Serial Number: %s\n",
                &t10_wwn->unit_serial[0]);
 }
@@ -780,8 +753,7 @@ static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial(
        const char *page,
        size_t count)
 {
-       struct se_subsystem_dev *su_dev = t10_wwn->t10_sub_dev;
-       struct se_device *dev;
+       struct se_device *dev = t10_wwn->t10_dev;
        unsigned char buf[INQUIRY_VPD_SERIAL_LEN];
 
        /*
@@ -794,7 +766,7 @@ static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial(
         * it is doing 'the right thing' wrt a world wide unique
         * VPD Unit Serial Number that OS dependent multipath can depend on.
         */
-       if (su_dev->su_dev_flags & SDF_FIRMWARE_VPD_UNIT_SERIAL) {
+       if (dev->dev_flags & DF_FIRMWARE_VPD_UNIT_SERIAL) {
                pr_err("Underlying SCSI device firmware provided VPD"
                        " Unit Serial, ignoring request\n");
                return -EOPNOTSUPP;
@@ -811,15 +783,13 @@ static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial(
         * (underneath the initiator side OS dependent multipath code)
         * could cause negative effects.
         */
-       dev = su_dev->se_dev_ptr;
-       if (dev) {
-               if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
-                       pr_err("Unable to set VPD Unit Serial while"
-                               " active %d $FABRIC_MOD exports exist\n",
-                               atomic_read(&dev->dev_export_obj.obj_access_count));
-                       return -EINVAL;
-               }
+       if (dev->export_count) {
+               pr_err("Unable to set VPD Unit Serial while"
+                       " active %d $FABRIC_MOD exports exist\n",
+                       dev->export_count);
+               return -EINVAL;
        }
+
        /*
         * This currently assumes ASCII encoding for emulated VPD Unit Serial.
         *
@@ -828,12 +798,12 @@ static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial(
         */
        memset(buf, 0, INQUIRY_VPD_SERIAL_LEN);
        snprintf(buf, INQUIRY_VPD_SERIAL_LEN, "%s", page);
-       snprintf(su_dev->t10_wwn.unit_serial, INQUIRY_VPD_SERIAL_LEN,
+       snprintf(dev->t10_wwn.unit_serial, INQUIRY_VPD_SERIAL_LEN,
                        "%s", strstrip(buf));
-       su_dev->su_dev_flags |= SDF_EMULATED_VPD_UNIT_SERIAL;
+       dev->dev_flags |= DF_EMULATED_VPD_UNIT_SERIAL;
 
        pr_debug("Target_Core_ConfigFS: Set emulated VPD Unit Serial:"
-                       " %s\n", su_dev->t10_wwn.unit_serial);
+                       " %s\n", dev->t10_wwn.unit_serial);
 
        return count;
 }
@@ -847,16 +817,10 @@ static ssize_t target_core_dev_wwn_show_attr_vpd_protocol_identifier(
        struct t10_wwn *t10_wwn,
        char *page)
 {
-       struct se_subsystem_dev *se_dev = t10_wwn->t10_sub_dev;
-       struct se_device *dev;
        struct t10_vpd *vpd;
        unsigned char buf[VPD_TMP_BUF_SIZE];
        ssize_t len = 0;
 
-       dev = se_dev->se_dev_ptr;
-       if (!dev)
-               return -ENODEV;
-
        memset(buf, 0, VPD_TMP_BUF_SIZE);
 
        spin_lock(&t10_wwn->t10_vpd_lock);
@@ -894,16 +858,10 @@ static ssize_t target_core_dev_wwn_show_attr_##_name(                     \
        struct t10_wwn *t10_wwn,                                        \
        char *page)                                                     \
 {                                                                      \
-       struct se_subsystem_dev *se_dev = t10_wwn->t10_sub_dev;         \
-       struct se_device *dev;                                          \
        struct t10_vpd *vpd;                                                    \
        unsigned char buf[VPD_TMP_BUF_SIZE];                            \
        ssize_t len = 0;                                                \
                                                                        \
-       dev = se_dev->se_dev_ptr;                                       \
-       if (!dev)                                                       \
-               return -ENODEV;                                         \
-                                                                       \
        spin_lock(&t10_wwn->t10_vpd_lock);                              \
        list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) {    \
                if (vpd->association != _assoc)                         \
@@ -1003,7 +961,7 @@ static struct config_item_type target_core_dev_wwn_cit = {
 
 /*  Start functions for struct config_item_type target_core_dev_pr_cit */
 
-CONFIGFS_EATTR_STRUCT(target_core_dev_pr, se_subsystem_dev);
+CONFIGFS_EATTR_STRUCT(target_core_dev_pr, se_device);
 #define SE_DEV_PR_ATTR(_name, _mode)                                   \
 static struct target_core_dev_pr_attribute target_core_dev_pr_##_name = \
        __CONFIGFS_EATTR(_name, _mode,                                  \
@@ -1015,13 +973,8 @@ static struct target_core_dev_pr_attribute target_core_dev_pr_##_name =   \
        __CONFIGFS_EATTR_RO(_name,                                      \
        target_core_dev_pr_show_attr_##_name);
 
-/*
- * res_holder
- */
-static ssize_t target_core_dev_pr_show_spc3_res(
-       struct se_device *dev,
-       char *page,
-       ssize_t *len)
+static ssize_t target_core_dev_pr_show_spc3_res(struct se_device *dev,
+               char *page)
 {
        struct se_node_acl *se_nacl;
        struct t10_pr_registration *pr_reg;
@@ -1030,134 +983,82 @@ static ssize_t target_core_dev_pr_show_spc3_res(
 
        memset(i_buf, 0, PR_REG_ISID_ID_LEN);
 
-       spin_lock(&dev->dev_reservation_lock);
        pr_reg = dev->dev_pr_res_holder;
-       if (!pr_reg) {
-               *len += sprintf(page + *len, "No SPC-3 Reservation holder\n");
-               spin_unlock(&dev->dev_reservation_lock);
-               return *len;
-       }
+       if (!pr_reg)
+               return sprintf(page, "No SPC-3 Reservation holder\n");
+
        se_nacl = pr_reg->pr_reg_nacl;
        prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
                                PR_REG_ISID_ID_LEN);
 
-       *len += sprintf(page + *len, "SPC-3 Reservation: %s Initiator: %s%s\n",
+       return sprintf(page, "SPC-3 Reservation: %s Initiator: %s%s\n",
                se_nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
                se_nacl->initiatorname, (prf_isid) ? &i_buf[0] : "");
-       spin_unlock(&dev->dev_reservation_lock);
-
-       return *len;
 }
 
-static ssize_t target_core_dev_pr_show_spc2_res(
-       struct se_device *dev,
-       char *page,
-       ssize_t *len)
+static ssize_t target_core_dev_pr_show_spc2_res(struct se_device *dev,
+               char *page)
 {
        struct se_node_acl *se_nacl;
+       ssize_t len;
 
-       spin_lock(&dev->dev_reservation_lock);
        se_nacl = dev->dev_reserved_node_acl;
-       if (!se_nacl) {
-               *len += sprintf(page + *len, "No SPC-2 Reservation holder\n");
-               spin_unlock(&dev->dev_reservation_lock);
-               return *len;
+       if (se_nacl) {
+               len = sprintf(page,
+                             "SPC-2 Reservation: %s Initiator: %s\n",
+                             se_nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
+                             se_nacl->initiatorname);
+       } else {
+               len = sprintf(page, "No SPC-2 Reservation holder\n");
        }
-       *len += sprintf(page + *len, "SPC-2 Reservation: %s Initiator: %s\n",
-               se_nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
-               se_nacl->initiatorname);
-       spin_unlock(&dev->dev_reservation_lock);
-
-       return *len;
+       return len;
 }
 
-static ssize_t target_core_dev_pr_show_attr_res_holder(
-       struct se_subsystem_dev *su_dev,
-       char *page)
+static ssize_t target_core_dev_pr_show_attr_res_holder(struct se_device *dev,
+               char *page)
 {
-       ssize_t len = 0;
+       int ret;
 
-       if (!su_dev->se_dev_ptr)
-               return -ENODEV;
-
-       switch (su_dev->t10_pr.res_type) {
-       case SPC3_PERSISTENT_RESERVATIONS:
-               target_core_dev_pr_show_spc3_res(su_dev->se_dev_ptr,
-                               page, &len);
-               break;
-       case SPC2_RESERVATIONS:
-               target_core_dev_pr_show_spc2_res(su_dev->se_dev_ptr,
-                               page, &len);
-               break;
-       case SPC_PASSTHROUGH:
-               len += sprintf(page+len, "Passthrough\n");
-               break;
-       default:
-               len += sprintf(page+len, "Unknown\n");
-               break;
-       }
+       if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+               return sprintf(page, "Passthrough\n");
 
-       return len;
+       spin_lock(&dev->dev_reservation_lock);
+       if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
+               ret = target_core_dev_pr_show_spc2_res(dev, page);
+       else
+               ret = target_core_dev_pr_show_spc3_res(dev, page);
+       spin_unlock(&dev->dev_reservation_lock);
+       return ret;
 }
 
 SE_DEV_PR_ATTR_RO(res_holder);
 
-/*
- * res_pr_all_tgt_pts
- */
 static ssize_t target_core_dev_pr_show_attr_res_pr_all_tgt_pts(
-       struct se_subsystem_dev *su_dev,
-       char *page)
+               struct se_device *dev, char *page)
 {
-       struct se_device *dev;
-       struct t10_pr_registration *pr_reg;
        ssize_t len = 0;
 
-       dev = su_dev->se_dev_ptr;
-       if (!dev)
-               return -ENODEV;
-
-       if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
-               return len;
-
        spin_lock(&dev->dev_reservation_lock);
-       pr_reg = dev->dev_pr_res_holder;
-       if (!pr_reg) {
+       if (!dev->dev_pr_res_holder) {
                len = sprintf(page, "No SPC-3 Reservation holder\n");
-               spin_unlock(&dev->dev_reservation_lock);
-               return len;
-       }
-       /*
-        * See All Target Ports (ALL_TG_PT) bit in spcr17, section 6.14.3
-        * Basic PERSISTENT RESERVER OUT parameter list, page 290
-        */
-       if (pr_reg->pr_reg_all_tg_pt)
+       } else if (dev->dev_pr_res_holder->pr_reg_all_tg_pt) {
                len = sprintf(page, "SPC-3 Reservation: All Target"
                        " Ports registration\n");
-       else
+       } else {
                len = sprintf(page, "SPC-3 Reservation: Single"
                        " Target Port registration\n");
-       spin_unlock(&dev->dev_reservation_lock);
+       }
 
+       spin_unlock(&dev->dev_reservation_lock);
        return len;
 }
 
 SE_DEV_PR_ATTR_RO(res_pr_all_tgt_pts);
 
-/*
- * res_pr_generation
- */
 static ssize_t target_core_dev_pr_show_attr_res_pr_generation(
-       struct se_subsystem_dev *su_dev,
-       char *page)
+               struct se_device *dev, char *page)
 {
-       if (!su_dev->se_dev_ptr)
-               return -ENODEV;
-
-       if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
-               return 0;
-
-       return sprintf(page, "0x%08x\n", su_dev->t10_pr.pr_generation);
+       return sprintf(page, "0x%08x\n", dev->t10_pr.pr_generation);
 }
 
 SE_DEV_PR_ATTR_RO(res_pr_generation);
@@ -1166,10 +1067,8 @@ SE_DEV_PR_ATTR_RO(res_pr_generation);
  * res_pr_holder_tg_port
  */
 static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port(
-       struct se_subsystem_dev *su_dev,
-       char *page)
+               struct se_device *dev, char *page)
 {
-       struct se_device *dev;
        struct se_node_acl *se_nacl;
        struct se_lun *lun;
        struct se_portal_group *se_tpg;
@@ -1177,20 +1076,13 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port(
        struct target_core_fabric_ops *tfo;
        ssize_t len = 0;
 
-       dev = su_dev->se_dev_ptr;
-       if (!dev)
-               return -ENODEV;
-
-       if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
-               return len;
-
        spin_lock(&dev->dev_reservation_lock);
        pr_reg = dev->dev_pr_res_holder;
        if (!pr_reg) {
                len = sprintf(page, "No SPC-3 Reservation holder\n");
-               spin_unlock(&dev->dev_reservation_lock);
-               return len;
+               goto out_unlock;
        }
+
        se_nacl = pr_reg->pr_reg_nacl;
        se_tpg = se_nacl->se_tpg;
        lun = pr_reg->pr_reg_tg_pt_lun;
@@ -1204,19 +1096,16 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port(
                " %s Logical Unit: %u\n", lun->lun_sep->sep_rtpi,
                tfo->get_fabric_name(), tfo->tpg_get_tag(se_tpg),
                tfo->get_fabric_name(), lun->unpacked_lun);
-       spin_unlock(&dev->dev_reservation_lock);
 
+out_unlock:
+       spin_unlock(&dev->dev_reservation_lock);
        return len;
 }
 
 SE_DEV_PR_ATTR_RO(res_pr_holder_tg_port);
 
-/*
- * res_pr_registered_i_pts
- */
 static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts(
-       struct se_subsystem_dev *su_dev,
-       char *page)
+               struct se_device *dev, char *page)
 {
        struct target_core_fabric_ops *tfo;
        struct t10_pr_registration *pr_reg;
@@ -1225,16 +1114,10 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts(
        ssize_t len = 0;
        int reg_count = 0, prf_isid;
 
-       if (!su_dev->se_dev_ptr)
-               return -ENODEV;
-
-       if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
-               return len;
-
        len += sprintf(page+len, "SPC-3 PR Registrations:\n");
 
-       spin_lock(&su_dev->t10_pr.registration_lock);
-       list_for_each_entry(pr_reg, &su_dev->t10_pr.registration_list,
+       spin_lock(&dev->t10_pr.registration_lock);
+       list_for_each_entry(pr_reg, &dev->t10_pr.registration_list,
                        pr_reg_list) {
 
                memset(buf, 0, 384);
@@ -1254,7 +1137,7 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts(
                len += sprintf(page+len, "%s", buf);
                reg_count++;
        }
-       spin_unlock(&su_dev->t10_pr.registration_lock);
+       spin_unlock(&dev->t10_pr.registration_lock);
 
        if (!reg_count)
                len += sprintf(page+len, "None\n");
@@ -1264,88 +1147,48 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts(
 
 SE_DEV_PR_ATTR_RO(res_pr_registered_i_pts);
 
-/*
- * res_pr_type
- */
 static ssize_t target_core_dev_pr_show_attr_res_pr_type(
-       struct se_subsystem_dev *su_dev,
-       char *page)
+               struct se_device *dev, char *page)
 {
-       struct se_device *dev;
        struct t10_pr_registration *pr_reg;
        ssize_t len = 0;
 
-       dev = su_dev->se_dev_ptr;
-       if (!dev)
-               return -ENODEV;
-
-       if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
-               return len;
-
        spin_lock(&dev->dev_reservation_lock);
        pr_reg = dev->dev_pr_res_holder;
-       if (!pr_reg) {
+       if (pr_reg) {
+               len = sprintf(page, "SPC-3 Reservation Type: %s\n",
+                       core_scsi3_pr_dump_type(pr_reg->pr_res_type));
+       } else {
                len = sprintf(page, "No SPC-3 Reservation holder\n");
-               spin_unlock(&dev->dev_reservation_lock);
-               return len;
        }
-       len = sprintf(page, "SPC-3 Reservation Type: %s\n",
-               core_scsi3_pr_dump_type(pr_reg->pr_res_type));
-       spin_unlock(&dev->dev_reservation_lock);
 
+       spin_unlock(&dev->dev_reservation_lock);
        return len;
 }
 
 SE_DEV_PR_ATTR_RO(res_pr_type);
 
-/*
- * res_type
- */
 static ssize_t target_core_dev_pr_show_attr_res_type(
-       struct se_subsystem_dev *su_dev,
-       char *page)
+               struct se_device *dev, char *page)
 {
-       ssize_t len = 0;
-
-       if (!su_dev->se_dev_ptr)
-               return -ENODEV;
-
-       switch (su_dev->t10_pr.res_type) {
-       case SPC3_PERSISTENT_RESERVATIONS:
-               len = sprintf(page, "SPC3_PERSISTENT_RESERVATIONS\n");
-               break;
-       case SPC2_RESERVATIONS:
-               len = sprintf(page, "SPC2_RESERVATIONS\n");
-               break;
-       case SPC_PASSTHROUGH:
-               len = sprintf(page, "SPC_PASSTHROUGH\n");
-               break;
-       default:
-               len = sprintf(page, "UNKNOWN\n");
-               break;
-       }
-
-       return len;
+       if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+               return sprintf(page, "SPC_PASSTHROUGH\n");
+       else if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
+               return sprintf(page, "SPC2_RESERVATIONS\n");
+       else
+               return sprintf(page, "SPC3_PERSISTENT_RESERVATIONS\n");
 }
 
 SE_DEV_PR_ATTR_RO(res_type);
 
-/*
- * res_aptpl_active
- */
-
 static ssize_t target_core_dev_pr_show_attr_res_aptpl_active(
-       struct se_subsystem_dev *su_dev,
-       char *page)
+               struct se_device *dev, char *page)
 {
-       if (!su_dev->se_dev_ptr)
-               return -ENODEV;
-
-       if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
+       if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
                return 0;
 
        return sprintf(page, "APTPL Bit Status: %s\n",
-               (su_dev->t10_pr.pr_aptpl_active) ? "Activated" : "Disabled");
+               (dev->t10_pr.pr_aptpl_active) ? "Activated" : "Disabled");
 }
 
 SE_DEV_PR_ATTR_RO(res_aptpl_active);
@@ -1354,13 +1197,9 @@ SE_DEV_PR_ATTR_RO(res_aptpl_active);
  * res_aptpl_metadata
  */
 static ssize_t target_core_dev_pr_show_attr_res_aptpl_metadata(
-       struct se_subsystem_dev *su_dev,
-       char *page)
+               struct se_device *dev, char *page)
 {
-       if (!su_dev->se_dev_ptr)
-               return -ENODEV;
-
-       if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
+       if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
                return 0;
 
        return sprintf(page, "Ready to process PR APTPL metadata..\n");
@@ -1392,11 +1231,10 @@ static match_table_t tokens = {
 };
 
 static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
-       struct se_subsystem_dev *su_dev,
+       struct se_device *dev,
        const char *page,
        size_t count)
 {
-       struct se_device *dev;
        unsigned char *i_fabric = NULL, *i_port = NULL, *isid = NULL;
        unsigned char *t_fabric = NULL, *t_port = NULL;
        char *orig, *ptr, *arg_p, *opts;
@@ -1408,14 +1246,12 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
        u16 port_rpti = 0, tpgt = 0;
        u8 type = 0, scope;
 
-       dev = su_dev->se_dev_ptr;
-       if (!dev)
-               return -ENODEV;
-
-       if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
+       if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+               return 0;
+       if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
                return 0;
 
-       if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+       if (dev->export_count) {
                pr_debug("Unable to process APTPL metadata while"
                        " active fabric exports exist\n");
                return -EINVAL;
@@ -1558,7 +1394,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
                goto out;
        }
 
-       ret = core_scsi3_alloc_aptpl_registration(&su_dev->t10_pr, sa_res_key,
+       ret = core_scsi3_alloc_aptpl_registration(&dev->t10_pr, sa_res_key,
                        i_port, isid, mapped_lun, t_port, tpgt, target_lun,
                        res_holder, all_tg_pt, type);
 out:
@@ -1573,7 +1409,7 @@ out:
 
 SE_DEV_PR_ATTR(res_aptpl_metadata, S_IRUGO | S_IWUSR);
 
-CONFIGFS_EATTR_OPS(target_core_dev_pr, se_subsystem_dev, se_dev_pr_group);
+CONFIGFS_EATTR_OPS(target_core_dev_pr, se_device, dev_pr_group);
 
 static struct configfs_attribute *target_core_dev_pr_attrs[] = {
        &target_core_dev_pr_res_holder.attr,
@@ -1605,18 +1441,14 @@ static struct config_item_type target_core_dev_pr_cit = {
 
 static ssize_t target_core_show_dev_info(void *p, char *page)
 {
-       struct se_subsystem_dev *se_dev = p;
-       struct se_hba *hba = se_dev->se_dev_hba;
-       struct se_subsystem_api *t = hba->transport;
+       struct se_device *dev = p;
+       struct se_subsystem_api *t = dev->transport;
        int bl = 0;
        ssize_t read_bytes = 0;
 
-       if (!se_dev->se_dev_ptr)
-               return -ENODEV;
-
-       transport_dump_dev_state(se_dev->se_dev_ptr, page, &bl);
+       transport_dump_dev_state(dev, page, &bl);
        read_bytes += bl;
-       read_bytes += t->show_configfs_dev_params(hba, se_dev, page+read_bytes);
+       read_bytes += t->show_configfs_dev_params(dev, page+read_bytes);
        return read_bytes;
 }
 
@@ -1633,17 +1465,10 @@ static ssize_t target_core_store_dev_control(
        const char *page,
        size_t count)
 {
-       struct se_subsystem_dev *se_dev = p;
-       struct se_hba *hba = se_dev->se_dev_hba;
-       struct se_subsystem_api *t = hba->transport;
+       struct se_device *dev = p;
+       struct se_subsystem_api *t = dev->transport;
 
-       if (!se_dev->se_dev_su_ptr) {
-               pr_err("Unable to locate struct se_subsystem_dev>se"
-                               "_dev_su_ptr\n");
-               return -EINVAL;
-       }
-
-       return t->set_configfs_dev_params(hba, se_dev, page, count);
+       return t->set_configfs_dev_params(dev, page, count);
 }
 
 static struct target_core_configfs_attribute target_core_attr_dev_control = {
@@ -1656,12 +1481,12 @@ static struct target_core_configfs_attribute target_core_attr_dev_control = {
 
 static ssize_t target_core_show_dev_alias(void *p, char *page)
 {
-       struct se_subsystem_dev *se_dev = p;
+       struct se_device *dev = p;
 
-       if (!(se_dev->su_dev_flags & SDF_USING_ALIAS))
+       if (!(dev->dev_flags & DF_USING_ALIAS))
                return 0;
 
-       return snprintf(page, PAGE_SIZE, "%s\n", se_dev->se_dev_alias);
+       return snprintf(page, PAGE_SIZE, "%s\n", dev->dev_alias);
 }
 
 static ssize_t target_core_store_dev_alias(
@@ -1669,8 +1494,8 @@ static ssize_t target_core_store_dev_alias(
        const char *page,
        size_t count)
 {
-       struct se_subsystem_dev *se_dev = p;
-       struct se_hba *hba = se_dev->se_dev_hba;
+       struct se_device *dev = p;
+       struct se_hba *hba = dev->se_hba;
        ssize_t read_bytes;
 
        if (count > (SE_DEV_ALIAS_LEN-1)) {
@@ -1680,19 +1505,18 @@ static ssize_t target_core_store_dev_alias(
                return -EINVAL;
        }
 
-       read_bytes = snprintf(&se_dev->se_dev_alias[0], SE_DEV_ALIAS_LEN,
-                       "%s", page);
+       read_bytes = snprintf(&dev->dev_alias[0], SE_DEV_ALIAS_LEN, "%s", page);
        if (!read_bytes)
                return -EINVAL;
-       if (se_dev->se_dev_alias[read_bytes - 1] == '\n')
-               se_dev->se_dev_alias[read_bytes - 1] = '\0';
+       if (dev->dev_alias[read_bytes - 1] == '\n')
+               dev->dev_alias[read_bytes - 1] = '\0';
 
-       se_dev->su_dev_flags |= SDF_USING_ALIAS;
+       dev->dev_flags |= DF_USING_ALIAS;
 
        pr_debug("Target_Core_ConfigFS: %s/%s set alias: %s\n",
                config_item_name(&hba->hba_group.cg_item),
-               config_item_name(&se_dev->se_dev_group.cg_item),
-               se_dev->se_dev_alias);
+               config_item_name(&dev->dev_group.cg_item),
+               dev->dev_alias);
 
        return read_bytes;
 }
@@ -1707,12 +1531,12 @@ static struct target_core_configfs_attribute target_core_attr_dev_alias = {
 
 static ssize_t target_core_show_dev_udev_path(void *p, char *page)
 {
-       struct se_subsystem_dev *se_dev = p;
+       struct se_device *dev = p;
 
-       if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH))
+       if (!(dev->dev_flags & DF_USING_UDEV_PATH))
                return 0;
 
-       return snprintf(page, PAGE_SIZE, "%s\n", se_dev->se_dev_udev_path);
+       return snprintf(page, PAGE_SIZE, "%s\n", dev->udev_path);
 }
 
 static ssize_t target_core_store_dev_udev_path(
@@ -1720,8 +1544,8 @@ static ssize_t target_core_store_dev_udev_path(
        const char *page,
        size_t count)
 {
-       struct se_subsystem_dev *se_dev = p;
-       struct se_hba *hba = se_dev->se_dev_hba;
+       struct se_device *dev = p;
+       struct se_hba *hba = dev->se_hba;
        ssize_t read_bytes;
 
        if (count > (SE_UDEV_PATH_LEN-1)) {
@@ -1731,19 +1555,19 @@ static ssize_t target_core_store_dev_udev_path(
                return -EINVAL;
        }
 
-       read_bytes = snprintf(&se_dev->se_dev_udev_path[0], SE_UDEV_PATH_LEN,
+       read_bytes = snprintf(&dev->udev_path[0], SE_UDEV_PATH_LEN,
                        "%s", page);
        if (!read_bytes)
                return -EINVAL;
-       if (se_dev->se_dev_udev_path[read_bytes - 1] == '\n')
-               se_dev->se_dev_udev_path[read_bytes - 1] = '\0';
+       if (dev->udev_path[read_bytes - 1] == '\n')
+               dev->udev_path[read_bytes - 1] = '\0';
 
-       se_dev->su_dev_flags |= SDF_USING_UDEV_PATH;
+       dev->dev_flags |= DF_USING_UDEV_PATH;
 
        pr_debug("Target_Core_ConfigFS: %s/%s set udev_path: %s\n",
                config_item_name(&hba->hba_group.cg_item),
-               config_item_name(&se_dev->se_dev_group.cg_item),
-               se_dev->se_dev_udev_path);
+               config_item_name(&dev->dev_group.cg_item),
+               dev->udev_path);
 
        return read_bytes;
 }
@@ -1761,11 +1585,9 @@ static ssize_t target_core_store_dev_enable(
        const char *page,
        size_t count)
 {
-       struct se_subsystem_dev *se_dev = p;
-       struct se_device *dev;
-       struct se_hba *hba = se_dev->se_dev_hba;
-       struct se_subsystem_api *t = hba->transport;
+       struct se_device *dev = p;
        char *ptr;
+       int ret;
 
        ptr = strstr(page, "1");
        if (!ptr) {
@@ -1773,25 +1595,10 @@ static ssize_t target_core_store_dev_enable(
                                " is \"1\"\n");
                return -EINVAL;
        }
-       if (se_dev->se_dev_ptr) {
-               pr_err("se_dev->se_dev_ptr already set for storage"
-                               " object\n");
-               return -EEXIST;
-       }
-
-       if (t->check_configfs_dev_params(hba, se_dev) < 0)
-               return -EINVAL;
-
-       dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr);
-       if (IS_ERR(dev))
-               return PTR_ERR(dev);
-       else if (!dev)
-               return -EINVAL;
-
-       se_dev->se_dev_ptr = dev;
-       pr_debug("Target_Core_ConfigFS: Registered se_dev->se_dev_ptr:"
-               " %p\n", se_dev->se_dev_ptr);
 
+       ret = target_configure_device(dev);
+       if (ret)
+               return ret;
        return count;
 }
 
@@ -1805,26 +1612,15 @@ static struct target_core_configfs_attribute target_core_attr_dev_enable = {
 
 static ssize_t target_core_show_alua_lu_gp(void *p, char *page)
 {
-       struct se_device *dev;
-       struct se_subsystem_dev *su_dev = p;
+       struct se_device *dev = p;
        struct config_item *lu_ci;
        struct t10_alua_lu_gp *lu_gp;
        struct t10_alua_lu_gp_member *lu_gp_mem;
        ssize_t len = 0;
 
-       dev = su_dev->se_dev_ptr;
-       if (!dev)
-               return -ENODEV;
-
-       if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED)
-               return len;
-
        lu_gp_mem = dev->dev_alua_lu_gp_mem;
-       if (!lu_gp_mem) {
-               pr_err("NULL struct se_device->dev_alua_lu_gp_mem"
-                               " pointer\n");
-               return -EINVAL;
-       }
+       if (!lu_gp_mem)
+               return 0;
 
        spin_lock(&lu_gp_mem->lu_gp_mem_lock);
        lu_gp = lu_gp_mem->lu_gp;
@@ -1843,24 +1639,17 @@ static ssize_t target_core_store_alua_lu_gp(
        const char *page,
        size_t count)
 {
-       struct se_device *dev;
-       struct se_subsystem_dev *su_dev = p;
-       struct se_hba *hba = su_dev->se_dev_hba;
+       struct se_device *dev = p;
+       struct se_hba *hba = dev->se_hba;
        struct t10_alua_lu_gp *lu_gp = NULL, *lu_gp_new = NULL;
        struct t10_alua_lu_gp_member *lu_gp_mem;
        unsigned char buf[LU_GROUP_NAME_BUF];
        int move = 0;
 
-       dev = su_dev->se_dev_ptr;
-       if (!dev)
-               return -ENODEV;
+       lu_gp_mem = dev->dev_alua_lu_gp_mem;
+       if (!lu_gp_mem)
+               return 0;
 
-       if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) {
-               pr_warn("SPC3_ALUA_EMULATED not enabled for %s/%s\n",
-                       config_item_name(&hba->hba_group.cg_item),
-                       config_item_name(&su_dev->se_dev_group.cg_item));
-               return -EINVAL;
-       }
        if (count > LU_GROUP_NAME_BUF) {
                pr_err("ALUA LU Group Alias too large!\n");
                return -EINVAL;
@@ -1881,14 +1670,6 @@ static ssize_t target_core_store_alua_lu_gp(
                if (!lu_gp_new)
                        return -ENODEV;
        }
-       lu_gp_mem = dev->dev_alua_lu_gp_mem;
-       if (!lu_gp_mem) {
-               if (lu_gp_new)
-                       core_alua_put_lu_gp_from_name(lu_gp_new);
-               pr_err("NULL struct se_device->dev_alua_lu_gp_mem"
-                               " pointer\n");
-               return -EINVAL;
-       }
 
        spin_lock(&lu_gp_mem->lu_gp_mem_lock);
        lu_gp = lu_gp_mem->lu_gp;
@@ -1902,7 +1683,7 @@ static ssize_t target_core_store_alua_lu_gp(
                                " from ALUA LU Group: core/alua/lu_gps/%s, ID:"
                                " %hu\n",
                                config_item_name(&hba->hba_group.cg_item),
-                               config_item_name(&su_dev->se_dev_group.cg_item),
+                               config_item_name(&dev->dev_group.cg_item),
                                config_item_name(&lu_gp->lu_gp_group.cg_item),
                                lu_gp->lu_gp_id);
 
@@ -1927,7 +1708,7 @@ static ssize_t target_core_store_alua_lu_gp(
                " core/alua/lu_gps/%s, ID: %hu\n",
                (move) ? "Moving" : "Adding",
                config_item_name(&hba->hba_group.cg_item),
-               config_item_name(&su_dev->se_dev_group.cg_item),
+               config_item_name(&dev->dev_group.cg_item),
                config_item_name(&lu_gp_new->lu_gp_group.cg_item),
                lu_gp_new->lu_gp_id);
 
@@ -1955,69 +1736,44 @@ static struct configfs_attribute *lio_core_dev_attrs[] = {
 
 static void target_core_dev_release(struct config_item *item)
 {
-       struct se_subsystem_dev *se_dev = container_of(to_config_group(item),
-                               struct se_subsystem_dev, se_dev_group);
-       struct se_hba *hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item);
-       struct se_subsystem_api *t = hba->transport;
-       struct config_group *dev_cg = &se_dev->se_dev_group;
+       struct config_group *dev_cg = to_config_group(item);
+       struct se_device *dev =
+               container_of(dev_cg, struct se_device, dev_group);
 
        kfree(dev_cg->default_groups);
-       /*
-        * This pointer will set when the storage is enabled with:
-        *`echo 1 > $CONFIGFS/core/$HBA/$DEV/dev_enable`
-        */
-       if (se_dev->se_dev_ptr) {
-               pr_debug("Target_Core_ConfigFS: Calling se_free_"
-                       "virtual_device() for se_dev_ptr: %p\n",
-                       se_dev->se_dev_ptr);
-
-               se_free_virtual_device(se_dev->se_dev_ptr, hba);
-       } else {
-               /*
-                * Release struct se_subsystem_dev->se_dev_su_ptr..
-                */
-               pr_debug("Target_Core_ConfigFS: Calling t->free_"
-                       "device() for se_dev_su_ptr: %p\n",
-                       se_dev->se_dev_su_ptr);
-
-               t->free_device(se_dev->se_dev_su_ptr);
-       }
-
-       pr_debug("Target_Core_ConfigFS: Deallocating se_subsystem"
-                       "_dev_t: %p\n", se_dev);
-       kfree(se_dev);
+       target_free_device(dev);
 }
 
 static ssize_t target_core_dev_show(struct config_item *item,
                                     struct configfs_attribute *attr,
                                     char *page)
 {
-       struct se_subsystem_dev *se_dev = container_of(
-                       to_config_group(item), struct se_subsystem_dev,
-                       se_dev_group);
+       struct config_group *dev_cg = to_config_group(item);
+       struct se_device *dev =
+               container_of(dev_cg, struct se_device, dev_group);
        struct target_core_configfs_attribute *tc_attr = container_of(
                        attr, struct target_core_configfs_attribute, attr);
 
        if (!tc_attr->show)
                return -EINVAL;
 
-       return tc_attr->show(se_dev, page);
+       return tc_attr->show(dev, page);
 }
 
 static ssize_t target_core_dev_store(struct config_item *item,
                                      struct configfs_attribute *attr,
                                      const char *page, size_t count)
 {
-       struct se_subsystem_dev *se_dev = container_of(
-                       to_config_group(item), struct se_subsystem_dev,
-                       se_dev_group);
+       struct config_group *dev_cg = to_config_group(item);
+       struct se_device *dev =
+               container_of(dev_cg, struct se_device, dev_group);
        struct target_core_configfs_attribute *tc_attr = container_of(
                        attr, struct target_core_configfs_attribute, attr);
 
        if (!tc_attr->store)
                return -EINVAL;
 
-       return tc_attr->store(se_dev, page, count);
+       return tc_attr->store(dev, page, count);
 }
 
 static struct configfs_item_operations target_core_dev_item_ops = {
@@ -2107,7 +1863,6 @@ static ssize_t target_core_alua_lu_gp_show_attr_members(
 {
        struct se_device *dev;
        struct se_hba *hba;
-       struct se_subsystem_dev *su_dev;
        struct t10_alua_lu_gp_member *lu_gp_mem;
        ssize_t len = 0, cur_len;
        unsigned char buf[LU_GROUP_NAME_BUF];
@@ -2117,12 +1872,11 @@ static ssize_t target_core_alua_lu_gp_show_attr_members(
        spin_lock(&lu_gp->lu_gp_lock);
        list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
                dev = lu_gp_mem->lu_gp_mem_dev;
-               su_dev = dev->se_sub_dev;
-               hba = su_dev->se_dev_hba;
+               hba = dev->se_hba;
 
                cur_len = snprintf(buf, LU_GROUP_NAME_BUF, "%s/%s\n",
                        config_item_name(&hba->hba_group.cg_item),
-                       config_item_name(&su_dev->se_dev_group.cg_item));
+                       config_item_name(&dev->dev_group.cg_item));
                cur_len++; /* Extra byte for NULL terminator */
 
                if ((cur_len + len) > PAGE_SIZE) {
@@ -2260,7 +2014,7 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state(
        const char *page,
        size_t count)
 {
-       struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
+       struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
        unsigned long tmp;
        int new_state, ret;
 
@@ -2284,7 +2038,7 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state(
                return -EINVAL;
        }
 
-       ret = core_alua_do_port_transition(tg_pt_gp, su_dev->se_dev_ptr,
+       ret = core_alua_do_port_transition(tg_pt_gp, dev,
                                        NULL, NULL, new_state, 0);
        return (!ret) ? count : -EINVAL;
 }
@@ -2620,11 +2374,10 @@ static struct config_group *target_core_alua_create_tg_pt_gp(
        struct t10_alua *alua = container_of(group, struct t10_alua,
                                        alua_tg_pt_gps_group);
        struct t10_alua_tg_pt_gp *tg_pt_gp;
-       struct se_subsystem_dev *su_dev = alua->t10_sub_dev;
        struct config_group *alua_tg_pt_gp_cg = NULL;
        struct config_item *alua_tg_pt_gp_ci = NULL;
 
-       tg_pt_gp = core_alua_allocate_tg_pt_gp(su_dev, name, 0);
+       tg_pt_gp = core_alua_allocate_tg_pt_gp(alua->t10_dev, name, 0);
        if (!tg_pt_gp)
                return NULL;
 
@@ -2721,10 +2474,10 @@ static struct config_group *target_core_make_subdev(
        const char *name)
 {
        struct t10_alua_tg_pt_gp *tg_pt_gp;
-       struct se_subsystem_dev *se_dev;
        struct se_subsystem_api *t;
        struct config_item *hba_ci = &group->cg_item;
        struct se_hba *hba = item_to_hba(hba_ci);
+       struct se_device *dev;
        struct config_group *dev_cg = NULL, *tg_pt_gp_cg = NULL;
        struct config_group *dev_stat_grp = NULL;
        int errno = -ENOMEM, ret;
@@ -2737,120 +2490,80 @@ static struct config_group *target_core_make_subdev(
         */
        t = hba->transport;
 
-       se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL);
-       if (!se_dev) {
-               pr_err("Unable to allocate memory for"
-                               " struct se_subsystem_dev\n");
-               goto unlock;
-       }
-       INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
-       spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
-       INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
-       INIT_LIST_HEAD(&se_dev->t10_pr.aptpl_reg_list);
-       spin_lock_init(&se_dev->t10_pr.registration_lock);
-       spin_lock_init(&se_dev->t10_pr.aptpl_reg_lock);
-       INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list);
-       spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock);
-       spin_lock_init(&se_dev->se_dev_lock);
-       se_dev->t10_pr.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
-       se_dev->t10_wwn.t10_sub_dev = se_dev;
-       se_dev->t10_alua.t10_sub_dev = se_dev;
-       se_dev->se_dev_attrib.da_sub_dev = se_dev;
-
-       se_dev->se_dev_hba = hba;
-       dev_cg = &se_dev->se_dev_group;
-
-       dev_cg->default_groups = kzalloc(sizeof(struct config_group) * 7,
+       dev = target_alloc_device(hba, name);
+       if (!dev)
+               goto out_unlock;
+
+       dev_cg = &dev->dev_group;
+
+       dev_cg->default_groups = kmalloc(sizeof(struct config_group *) * 6,
                        GFP_KERNEL);
        if (!dev_cg->default_groups)
-               goto out;
-       /*
-        * Set se_dev_su_ptr from struct se_subsystem_api returned void ptr
-        * for ->allocate_virtdevice()
-        *
-        * se_dev->se_dev_ptr will be set after ->create_virtdev()
-        * has been called successfully in the next level up in the
-        * configfs tree for device object's struct config_group.
-        */
-       se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, name);
-       if (!se_dev->se_dev_su_ptr) {
-               pr_err("Unable to locate subsystem dependent pointer"
-                       " from allocate_virtdevice()\n");
-               goto out;
-       }
+               goto out_free_device;
 
-       config_group_init_type_name(&se_dev->se_dev_group, name,
-                       &target_core_dev_cit);
-       config_group_init_type_name(&se_dev->se_dev_attrib.da_group, "attrib",
+       config_group_init_type_name(dev_cg, name, &target_core_dev_cit);
+       config_group_init_type_name(&dev->dev_attrib.da_group, "attrib",
                        &target_core_dev_attrib_cit);
-       config_group_init_type_name(&se_dev->se_dev_pr_group, "pr",
+       config_group_init_type_name(&dev->dev_pr_group, "pr",
                        &target_core_dev_pr_cit);
-       config_group_init_type_name(&se_dev->t10_wwn.t10_wwn_group, "wwn",
+       config_group_init_type_name(&dev->t10_wwn.t10_wwn_group, "wwn",
                        &target_core_dev_wwn_cit);
-       config_group_init_type_name(&se_dev->t10_alua.alua_tg_pt_gps_group,
+       config_group_init_type_name(&dev->t10_alua.alua_tg_pt_gps_group,
                        "alua", &target_core_alua_tg_pt_gps_cit);
-       config_group_init_type_name(&se_dev->dev_stat_grps.stat_group,
+       config_group_init_type_name(&dev->dev_stat_grps.stat_group,
                        "statistics", &target_core_stat_cit);
 
-       dev_cg->default_groups[0] = &se_dev->se_dev_attrib.da_group;
-       dev_cg->default_groups[1] = &se_dev->se_dev_pr_group;
-       dev_cg->default_groups[2] = &se_dev->t10_wwn.t10_wwn_group;
-       dev_cg->default_groups[3] = &se_dev->t10_alua.alua_tg_pt_gps_group;
-       dev_cg->default_groups[4] = &se_dev->dev_stat_grps.stat_group;
+       dev_cg->default_groups[0] = &dev->dev_attrib.da_group;
+       dev_cg->default_groups[1] = &dev->dev_pr_group;
+       dev_cg->default_groups[2] = &dev->t10_wwn.t10_wwn_group;
+       dev_cg->default_groups[3] = &dev->t10_alua.alua_tg_pt_gps_group;
+       dev_cg->default_groups[4] = &dev->dev_stat_grps.stat_group;
        dev_cg->default_groups[5] = NULL;
        /*
         * Add core/$HBA/$DEV/alua/default_tg_pt_gp
         */
-       tg_pt_gp = core_alua_allocate_tg_pt_gp(se_dev, "default_tg_pt_gp", 1);
+       tg_pt_gp = core_alua_allocate_tg_pt_gp(dev, "default_tg_pt_gp", 1);
        if (!tg_pt_gp)
-               goto out;
+               goto out_free_dev_cg_default_groups;
+       dev->t10_alua.default_tg_pt_gp = tg_pt_gp;
 
-       tg_pt_gp_cg = &se_dev->t10_alua.alua_tg_pt_gps_group;
-       tg_pt_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+       tg_pt_gp_cg = &dev->t10_alua.alua_tg_pt_gps_group;
+       tg_pt_gp_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
                                GFP_KERNEL);
        if (!tg_pt_gp_cg->default_groups) {
                pr_err("Unable to allocate tg_pt_gp_cg->"
                                "default_groups\n");
-               goto out;
+               goto out_free_tg_pt_gp;
        }
 
        config_group_init_type_name(&tg_pt_gp->tg_pt_gp_group,
                        "default_tg_pt_gp", &target_core_alua_tg_pt_gp_cit);
        tg_pt_gp_cg->default_groups[0] = &tg_pt_gp->tg_pt_gp_group;
        tg_pt_gp_cg->default_groups[1] = NULL;
-       se_dev->t10_alua.default_tg_pt_gp = tg_pt_gp;
        /*
         * Add core/$HBA/$DEV/statistics/ default groups
         */
-       dev_stat_grp = &se_dev->dev_stat_grps.stat_group;
-       dev_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 4,
+       dev_stat_grp = &dev->dev_stat_grps.stat_group;
+       dev_stat_grp->default_groups = kmalloc(sizeof(struct config_group *) * 4,
                                GFP_KERNEL);
        if (!dev_stat_grp->default_groups) {
                pr_err("Unable to allocate dev_stat_grp->default_groups\n");
-               goto out;
+               goto out_free_tg_pt_gp_cg_default_groups;
        }
-       target_stat_setup_dev_default_groups(se_dev);
-
-       pr_debug("Target_Core_ConfigFS: Allocated struct se_subsystem_dev:"
-               " %p se_dev_su_ptr: %p\n", se_dev, se_dev->se_dev_su_ptr);
+       target_stat_setup_dev_default_groups(dev);
 
        mutex_unlock(&hba->hba_access_mutex);
-       return &se_dev->se_dev_group;
-out:
-       if (se_dev->t10_alua.default_tg_pt_gp) {
-               core_alua_free_tg_pt_gp(se_dev->t10_alua.default_tg_pt_gp);
-               se_dev->t10_alua.default_tg_pt_gp = NULL;
-       }
-       if (dev_stat_grp)
-               kfree(dev_stat_grp->default_groups);
-       if (tg_pt_gp_cg)
-               kfree(tg_pt_gp_cg->default_groups);
-       if (dev_cg)
-               kfree(dev_cg->default_groups);
-       if (se_dev->se_dev_su_ptr)
-               t->free_device(se_dev->se_dev_su_ptr);
-       kfree(se_dev);
-unlock:
+       return dev_cg;
+
+out_free_tg_pt_gp_cg_default_groups:
+       kfree(tg_pt_gp_cg->default_groups);
+out_free_tg_pt_gp:
+       core_alua_free_tg_pt_gp(tg_pt_gp);
+out_free_dev_cg_default_groups:
+       kfree(dev_cg->default_groups);
+out_free_device:
+       target_free_device(dev);
+out_unlock:
        mutex_unlock(&hba->hba_access_mutex);
        return ERR_PTR(errno);
 }
@@ -2859,18 +2572,19 @@ static void target_core_drop_subdev(
        struct config_group *group,
        struct config_item *item)
 {
-       struct se_subsystem_dev *se_dev = container_of(to_config_group(item),
-                               struct se_subsystem_dev, se_dev_group);
+       struct config_group *dev_cg = to_config_group(item);
+       struct se_device *dev =
+               container_of(dev_cg, struct se_device, dev_group);
        struct se_hba *hba;
        struct config_item *df_item;
-       struct config_group *dev_cg, *tg_pt_gp_cg, *dev_stat_grp;
+       struct config_group *tg_pt_gp_cg, *dev_stat_grp;
        int i;
 
-       hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item);
+       hba = item_to_hba(&dev->se_hba->hba_group.cg_item);
 
        mutex_lock(&hba->hba_access_mutex);
 
-       dev_stat_grp = &se_dev->dev_stat_grps.stat_group;
+       dev_stat_grp = &dev->dev_stat_grps.stat_group;
        for (i = 0; dev_stat_grp->default_groups[i]; i++) {
                df_item = &dev_stat_grp->default_groups[i]->cg_item;
                dev_stat_grp->default_groups[i] = NULL;
@@ -2878,7 +2592,7 @@ static void target_core_drop_subdev(
        }
        kfree(dev_stat_grp->default_groups);
 
-       tg_pt_gp_cg = &se_dev->t10_alua.alua_tg_pt_gps_group;
+       tg_pt_gp_cg = &dev->t10_alua.alua_tg_pt_gps_group;
        for (i = 0; tg_pt_gp_cg->default_groups[i]; i++) {
                df_item = &tg_pt_gp_cg->default_groups[i]->cg_item;
                tg_pt_gp_cg->default_groups[i] = NULL;
@@ -2889,17 +2603,15 @@ static void target_core_drop_subdev(
         * core_alua_free_tg_pt_gp() is called from ->default_tg_pt_gp
         * directly from target_core_alua_tg_pt_gp_release().
         */
-       se_dev->t10_alua.default_tg_pt_gp = NULL;
+       dev->t10_alua.default_tg_pt_gp = NULL;
 
-       dev_cg = &se_dev->se_dev_group;
        for (i = 0; dev_cg->default_groups[i]; i++) {
                df_item = &dev_cg->default_groups[i]->cg_item;
                dev_cg->default_groups[i] = NULL;
                config_item_put(df_item);
        }
        /*
-        * The releasing of se_dev and associated se_dev->se_dev_ptr is done
-        * from target_core_dev_item_ops->release() ->target_core_dev_release().
+        * se_dev is released from target_core_dev_item_ops->release()
         */
        config_item_put(item);
        mutex_unlock(&hba->hba_access_mutex);
@@ -2962,13 +2674,10 @@ static ssize_t target_core_hba_store_attr_hba_mode(struct se_hba *hba,
                return -EINVAL;
        }
 
-       spin_lock(&hba->device_lock);
-       if (!list_empty(&hba->hba_dev_list)) {
+       if (hba->dev_count) {
                pr_err("Unable to set hba_mode with active devices\n");
-               spin_unlock(&hba->device_lock);
                return -EINVAL;
        }
-       spin_unlock(&hba->device_lock);
 
        ret = transport->pmode_enable_hba(hba, mode_flag);
        if (ret < 0)
@@ -3120,7 +2829,7 @@ static int __init target_core_init_configfs(void)
         * and ALUA Logical Unit Group and Target Port Group infrastructure.
         */
        target_cg = &subsys->su_group;
-       target_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+       target_cg->default_groups = kmalloc(sizeof(struct config_group) * 2,
                                GFP_KERNEL);
        if (!target_cg->default_groups) {
                pr_err("Unable to allocate target_cg->default_groups\n");
@@ -3136,7 +2845,7 @@ static int __init target_core_init_configfs(void)
         * Create ALUA infrastructure under /sys/kernel/config/target/core/alua/
         */
        hba_cg = &target_core_hbagroup;
-       hba_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+       hba_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
                                GFP_KERNEL);
        if (!hba_cg->default_groups) {
                pr_err("Unable to allocate hba_cg->default_groups\n");
@@ -3152,7 +2861,7 @@ static int __init target_core_init_configfs(void)
         * groups under /sys/kernel/config/target/core/alua/
         */
        alua_cg = &alua_group;
-       alua_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+       alua_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
                        GFP_KERNEL);
        if (!alua_cg->default_groups) {
                pr_err("Unable to allocate alua_cg->default_groups\n");
@@ -3174,7 +2883,7 @@ static int __init target_core_init_configfs(void)
        }
 
        lu_gp_cg = &alua_lu_gps_group;
-       lu_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+       lu_gp_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
                        GFP_KERNEL);
        if (!lu_gp_cg->default_groups) {
                pr_err("Unable to allocate lu_gp_cg->default_groups\n");
index 9abef9f..e269510 100644 (file)
@@ -4,10 +4,7 @@
  * This file contains the TCM Virtual Device and Disk Transport
  * agnostic related functions.
  *
- * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
- * Copyright (c) 2005-2006 SBE, Inc.  All Rights Reserved.
- * Copyright (c) 2007-2010 Rising Tide Systems
- * Copyright (c) 2008-2010 Linux-iSCSI.org
+ * (c) Copyright 2003-2012 RisingTide Systems LLC.
  *
  * Nicholas A. Bellinger <nab@kernel.org>
  *
 #include "target_core_pr.h"
 #include "target_core_ua.h"
 
-static void se_dev_start(struct se_device *dev);
-static void se_dev_stop(struct se_device *dev);
-
 static struct se_hba *lun0_hba;
-static struct se_subsystem_dev *lun0_su_dev;
 /* not static, needed by tpg.c */
 struct se_device *g_lun0_dev;
 
-int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
+sense_reason_t
+transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
 {
        struct se_lun *se_lun = NULL;
        struct se_session *se_sess = se_cmd->se_sess;
        struct se_device *dev;
        unsigned long flags;
 
-       if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) {
-               se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
-               se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
-               return -ENODEV;
-       }
+       if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG)
+               return TCM_NON_EXISTENT_LUN;
 
        spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
        se_cmd->se_deve = se_sess->se_node_acl->device_list[unpacked_lun];
@@ -81,14 +72,12 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
 
                if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
                    (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
-                       se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
-                       se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
                        pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
                                " Access for 0x%08x\n",
                                se_cmd->se_tfo->get_fabric_name(),
                                unpacked_lun);
                        spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
-                       return -EACCES;
+                       return TCM_WRITE_PROTECTED;
                }
 
                if (se_cmd->data_direction == DMA_TO_DEVICE)
@@ -113,38 +102,24 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
                 * MappedLUN=0 exists for this Initiator Port.
                 */
                if (unpacked_lun != 0) {
-                       se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
-                       se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
                        pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
                                " Access for 0x%08x\n",
                                se_cmd->se_tfo->get_fabric_name(),
                                unpacked_lun);
-                       return -ENODEV;
+                       return TCM_NON_EXISTENT_LUN;
                }
                /*
                 * Force WRITE PROTECT for virtual LUN 0
                 */
                if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
-                   (se_cmd->data_direction != DMA_NONE)) {
-                       se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
-                       se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
-                       return -EACCES;
-               }
+                   (se_cmd->data_direction != DMA_NONE))
+                       return TCM_WRITE_PROTECTED;
 
                se_lun = &se_sess->se_tpg->tpg_virt_lun0;
                se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
                se_cmd->orig_fe_lun = 0;
                se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
        }
-       /*
-        * Determine if the struct se_lun is online.
-        * FIXME: Check for LUN_RESET + UNIT Attention
-        */
-       if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
-               se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
-               se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
-               return -ENODEV;
-       }
 
        /* Directly associate cmd with se_dev */
        se_cmd->se_dev = se_lun->lun_se_dev;
@@ -175,11 +150,8 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
        struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
        unsigned long flags;
 
-       if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) {
-               se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
-               se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+       if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG)
                return -ENODEV;
-       }
 
        spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
        se_cmd->se_deve = se_sess->se_node_acl->device_list[unpacked_lun];
@@ -199,15 +171,6 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
                        " Access for 0x%08x\n",
                        se_cmd->se_tfo->get_fabric_name(),
                        unpacked_lun);
-               se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
-               return -ENODEV;
-       }
-       /*
-        * Determine if the struct se_lun is online.
-        * FIXME: Check for LUN_RESET + UNIT Attention
-        */
-       if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
-               se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
                return -ENODEV;
        }
 
@@ -565,7 +528,6 @@ static void core_export_port(
        struct se_port *port,
        struct se_lun *lun)
 {
-       struct se_subsystem_dev *su_dev = dev->se_sub_dev;
        struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL;
 
        spin_lock(&dev->se_port_lock);
@@ -578,7 +540,8 @@ static void core_export_port(
        list_add_tail(&port->sep_list, &dev->dev_sep_list);
        spin_unlock(&dev->se_port_lock);
 
-       if (su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
+       if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV &&
+           !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) {
                tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port);
                if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) {
                        pr_err("Unable to allocate t10_alua_tg_pt"
@@ -587,7 +550,7 @@ static void core_export_port(
                }
                spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
                __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
-                       su_dev->t10_alua.default_tg_pt_gp);
+                       dev->t10_alua.default_tg_pt_gp);
                spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
                pr_debug("%s/%s: Adding to default ALUA Target Port"
                        " Group: alua/default_tg_pt_gp\n",
@@ -625,6 +588,7 @@ int core_dev_export(
        struct se_portal_group *tpg,
        struct se_lun *lun)
 {
+       struct se_hba *hba = dev->se_hba;
        struct se_port *port;
 
        port = core_alloc_port(dev);
@@ -632,9 +596,11 @@ int core_dev_export(
                return PTR_ERR(port);
 
        lun->lun_se_dev = dev;
-       se_dev_start(dev);
 
-       atomic_inc(&dev->dev_export_obj.obj_access_count);
+       spin_lock(&hba->device_lock);
+       dev->export_count++;
+       spin_unlock(&hba->device_lock);
+
        core_export_port(dev, tpg, port, lun);
        return 0;
 }
@@ -644,6 +610,7 @@ void core_dev_unexport(
        struct se_portal_group *tpg,
        struct se_lun *lun)
 {
+       struct se_hba *hba = dev->se_hba;
        struct se_port *port = lun->lun_sep;
 
        spin_lock(&lun->lun_sep_lock);
@@ -654,198 +621,27 @@ void core_dev_unexport(
        spin_unlock(&lun->lun_sep_lock);
 
        spin_lock(&dev->se_port_lock);
-       atomic_dec(&dev->dev_export_obj.obj_access_count);
        core_release_port(dev, port);
        spin_unlock(&dev->se_port_lock);
 
-       se_dev_stop(dev);
-       lun->lun_se_dev = NULL;
-}
-
-int target_report_luns(struct se_cmd *se_cmd)
-{
-       struct se_dev_entry *deve;
-       struct se_session *se_sess = se_cmd->se_sess;
-       unsigned char *buf;
-       u32 lun_count = 0, offset = 8, i;
-
-       if (se_cmd->data_length < 16) {
-               pr_warn("REPORT LUNS allocation length %u too small\n",
-                       se_cmd->data_length);
-               se_cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
-               return -EINVAL;
-       }
-
-       buf = transport_kmap_data_sg(se_cmd);
-       if (!buf)
-               return -ENOMEM;
-
-       /*
-        * If no struct se_session pointer is present, this struct se_cmd is
-        * coming via a target_core_mod PASSTHROUGH op, and not through
-        * a $FABRIC_MOD.  In that case, report LUN=0 only.
-        */
-       if (!se_sess) {
-               int_to_scsilun(0, (struct scsi_lun *)&buf[offset]);
-               lun_count = 1;
-               goto done;
-       }
-
-       spin_lock_irq(&se_sess->se_node_acl->device_list_lock);
-       for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
-               deve = se_sess->se_node_acl->device_list[i];
-               if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
-                       continue;
-               /*
-                * We determine the correct LUN LIST LENGTH even once we
-                * have reached the initial allocation length.
-                * See SPC2-R20 7.19.
-                */
-               lun_count++;
-               if ((offset + 8) > se_cmd->data_length)
-                       continue;
-
-               int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]);
-               offset += 8;
-       }
-       spin_unlock_irq(&se_sess->se_node_acl->device_list_lock);
-
-       /*
-        * See SPC3 r07, page 159.
-        */
-done:
-       lun_count *= 8;
-       buf[0] = ((lun_count >> 24) & 0xff);
-       buf[1] = ((lun_count >> 16) & 0xff);
-       buf[2] = ((lun_count >> 8) & 0xff);
-       buf[3] = (lun_count & 0xff);
-       transport_kunmap_data_sg(se_cmd);
-
-       target_complete_cmd(se_cmd, GOOD);
-       return 0;
-}
-
-/*     se_release_device_for_hba():
- *
- *
- */
-void se_release_device_for_hba(struct se_device *dev)
-{
-       struct se_hba *hba = dev->se_hba;
-
-       if ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) ||
-           (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) ||
-           (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) ||
-           (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_ACTIVATED) ||
-           (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_DEACTIVATED))
-               se_dev_stop(dev);
-
-       if (dev->dev_ptr) {
-               destroy_workqueue(dev->tmr_wq);
-               if (dev->transport->free_device)
-                       dev->transport->free_device(dev->dev_ptr);
-       }
-
        spin_lock(&hba->device_lock);
-       list_del(&dev->dev_list);
-       hba->dev_count--;
+       dev->export_count--;
        spin_unlock(&hba->device_lock);
 
-       core_scsi3_free_all_registrations(dev);
-       se_release_vpd_for_dev(dev);
-
-       kfree(dev);
+       lun->lun_se_dev = NULL;
 }
 
-void se_release_vpd_for_dev(struct se_device *dev)
+static void se_release_vpd_for_dev(struct se_device *dev)
 {
        struct t10_vpd *vpd, *vpd_tmp;
 
-       spin_lock(&dev->se_sub_dev->t10_wwn.t10_vpd_lock);
+       spin_lock(&dev->t10_wwn.t10_vpd_lock);
        list_for_each_entry_safe(vpd, vpd_tmp,
-                       &dev->se_sub_dev->t10_wwn.t10_vpd_list, vpd_list) {
+                       &dev->t10_wwn.t10_vpd_list, vpd_list) {
                list_del(&vpd->vpd_list);
                kfree(vpd);
        }
-       spin_unlock(&dev->se_sub_dev->t10_wwn.t10_vpd_lock);
-}
-
-/*     se_free_virtual_device():
- *
- *     Used for IBLOCK, RAMDISK, and FILEIO Transport Drivers.
- */
-int se_free_virtual_device(struct se_device *dev, struct se_hba *hba)
-{
-       if (!list_empty(&dev->dev_sep_list))
-               dump_stack();
-
-       core_alua_free_lu_gp_mem(dev);
-       se_release_device_for_hba(dev);
-
-       return 0;
-}
-
-static void se_dev_start(struct se_device *dev)
-{
-       struct se_hba *hba = dev->se_hba;
-
-       spin_lock(&hba->device_lock);
-       atomic_inc(&dev->dev_obj.obj_access_count);
-       if (atomic_read(&dev->dev_obj.obj_access_count) == 1) {
-               if (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) {
-                       dev->dev_status &= ~TRANSPORT_DEVICE_DEACTIVATED;
-                       dev->dev_status |= TRANSPORT_DEVICE_ACTIVATED;
-               } else if (dev->dev_status &
-                          TRANSPORT_DEVICE_OFFLINE_DEACTIVATED) {
-                       dev->dev_status &=
-                               ~TRANSPORT_DEVICE_OFFLINE_DEACTIVATED;
-                       dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_ACTIVATED;
-               }
-       }
-       spin_unlock(&hba->device_lock);
-}
-
-static void se_dev_stop(struct se_device *dev)
-{
-       struct se_hba *hba = dev->se_hba;
-
-       spin_lock(&hba->device_lock);
-       atomic_dec(&dev->dev_obj.obj_access_count);
-       if (atomic_read(&dev->dev_obj.obj_access_count) == 0) {
-               if (dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) {
-                       dev->dev_status &= ~TRANSPORT_DEVICE_ACTIVATED;
-                       dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED;
-               } else if (dev->dev_status &
-                          TRANSPORT_DEVICE_OFFLINE_ACTIVATED) {
-                       dev->dev_status &= ~TRANSPORT_DEVICE_OFFLINE_ACTIVATED;
-                       dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_DEACTIVATED;
-               }
-       }
-       spin_unlock(&hba->device_lock);
-}
-
-int se_dev_check_online(struct se_device *dev)
-{
-       unsigned long flags;
-       int ret;
-
-       spin_lock_irqsave(&dev->dev_status_lock, flags);
-       ret = ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) ||
-              (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED)) ? 0 : 1;
-       spin_unlock_irqrestore(&dev->dev_status_lock, flags);
-
-       return ret;
-}
-
-int se_dev_check_shutdown(struct se_device *dev)
-{
-       int ret;
-
-       spin_lock_irq(&dev->dev_status_lock);
-       ret = (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN);
-       spin_unlock_irq(&dev->dev_status_lock);
-
-       return ret;
+       spin_unlock(&dev->t10_wwn.t10_vpd_lock);
 }
 
 static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
@@ -866,72 +662,13 @@ static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
        return aligned_max_sectors;
 }
 
-void se_dev_set_default_attribs(
-       struct se_device *dev,
-       struct se_dev_limits *dev_limits)
-{
-       struct queue_limits *limits = &dev_limits->limits;
-
-       dev->se_sub_dev->se_dev_attrib.emulate_dpo = DA_EMULATE_DPO;
-       dev->se_sub_dev->se_dev_attrib.emulate_fua_write = DA_EMULATE_FUA_WRITE;
-       dev->se_sub_dev->se_dev_attrib.emulate_fua_read = DA_EMULATE_FUA_READ;
-       dev->se_sub_dev->se_dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
-       dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
-       dev->se_sub_dev->se_dev_attrib.emulate_tas = DA_EMULATE_TAS;
-       dev->se_sub_dev->se_dev_attrib.emulate_tpu = DA_EMULATE_TPU;
-       dev->se_sub_dev->se_dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
-       dev->se_sub_dev->se_dev_attrib.emulate_reservations = DA_EMULATE_RESERVATIONS;
-       dev->se_sub_dev->se_dev_attrib.emulate_alua = DA_EMULATE_ALUA;
-       dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
-       dev->se_sub_dev->se_dev_attrib.is_nonrot = DA_IS_NONROT;
-       dev->se_sub_dev->se_dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
-       /*
-        * The TPU=1 and TPWS=1 settings will be set in TCM/IBLOCK
-        * iblock_create_virtdevice() from struct queue_limits values
-        * if blk_queue_discard()==1
-        */
-       dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
-       dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count =
-               DA_MAX_UNMAP_BLOCK_DESC_COUNT;
-       dev->se_sub_dev->se_dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
-       dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment =
-                               DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
-       /*
-        * block_size is based on subsystem plugin dependent requirements.
-        */
-       dev->se_sub_dev->se_dev_attrib.hw_block_size = limits->logical_block_size;
-       dev->se_sub_dev->se_dev_attrib.block_size = limits->logical_block_size;
-       /*
-        * Align max_hw_sectors down to PAGE_SIZE I/O transfers
-        */
-       limits->max_hw_sectors = se_dev_align_max_sectors(limits->max_hw_sectors,
-                                               limits->logical_block_size);
-       dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors;
-
-       /*
-        * Set fabric_max_sectors, which is reported in block limits
-        * VPD page (B0h).
-        */
-       dev->se_sub_dev->se_dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS;
-       /*
-        * Set optimal_sectors from fabric_max_sectors, which can be
-        * lowered via configfs.
-        */
-       dev->se_sub_dev->se_dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS;
-       /*
-        * queue_depth is based on subsystem plugin dependent requirements.
-        */
-       dev->se_sub_dev->se_dev_attrib.hw_queue_depth = dev_limits->hw_queue_depth;
-       dev->se_sub_dev->se_dev_attrib.queue_depth = dev_limits->queue_depth;
-}
-
 int se_dev_set_max_unmap_lba_count(
        struct se_device *dev,
        u32 max_unmap_lba_count)
 {
-       dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = max_unmap_lba_count;
+       dev->dev_attrib.max_unmap_lba_count = max_unmap_lba_count;
        pr_debug("dev[%p]: Set max_unmap_lba_count: %u\n",
-                       dev, dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count);
+                       dev, dev->dev_attrib.max_unmap_lba_count);
        return 0;
 }
 
@@ -939,10 +676,10 @@ int se_dev_set_max_unmap_block_desc_count(
        struct se_device *dev,
        u32 max_unmap_block_desc_count)
 {
-       dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count =
+       dev->dev_attrib.max_unmap_block_desc_count =
                max_unmap_block_desc_count;
        pr_debug("dev[%p]: Set max_unmap_block_desc_count: %u\n",
-                       dev, dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count);
+                       dev, dev->dev_attrib.max_unmap_block_desc_count);
        return 0;
 }
 
@@ -950,9 +687,9 @@ int se_dev_set_unmap_granularity(
        struct se_device *dev,
        u32 unmap_granularity)
 {
-       dev->se_sub_dev->se_dev_attrib.unmap_granularity = unmap_granularity;
+       dev->dev_attrib.unmap_granularity = unmap_granularity;
        pr_debug("dev[%p]: Set unmap_granularity: %u\n",
-                       dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity);
+                       dev, dev->dev_attrib.unmap_granularity);
        return 0;
 }
 
@@ -960,9 +697,19 @@ int se_dev_set_unmap_granularity_alignment(
        struct se_device *dev,
        u32 unmap_granularity_alignment)
 {
-       dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment;
+       dev->dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment;
        pr_debug("dev[%p]: Set unmap_granularity_alignment: %u\n",
-                       dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment);
+                       dev, dev->dev_attrib.unmap_granularity_alignment);
+       return 0;
+}
+
+int se_dev_set_max_write_same_len(
+       struct se_device *dev,
+       u32 max_write_same_len)
+{
+       dev->dev_attrib.max_write_same_len = max_write_same_len;
+       pr_debug("dev[%p]: Set max_write_same_len: %u\n",
+                       dev, dev->dev_attrib.max_write_same_len);
        return 0;
 }
 
@@ -993,9 +740,9 @@ int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
                pr_err("emulate_fua_write not supported for pSCSI\n");
                return -EINVAL;
        }
-       dev->se_sub_dev->se_dev_attrib.emulate_fua_write = flag;
+       dev->dev_attrib.emulate_fua_write = flag;
        pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
-                       dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_write);
+                       dev, dev->dev_attrib.emulate_fua_write);
        return 0;
 }
 
@@ -1025,9 +772,9 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
                pr_err("emulate_write_cache not supported for pSCSI\n");
                return -EINVAL;
        }
-       dev->se_sub_dev->se_dev_attrib.emulate_write_cache = flag;
+       dev->dev_attrib.emulate_write_cache = flag;
        pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
-                       dev, dev->se_sub_dev->se_dev_attrib.emulate_write_cache);
+                       dev, dev->dev_attrib.emulate_write_cache);
        return 0;
 }
 
@@ -1038,16 +785,15 @@ int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
                return -EINVAL;
        }
 
-       if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+       if (dev->export_count) {
                pr_err("dev[%p]: Unable to change SE Device"
-                       " UA_INTRLCK_CTRL while dev_export_obj: %d count"
-                       " exists\n", dev,
-                       atomic_read(&dev->dev_export_obj.obj_access_count));
+                       " UA_INTRLCK_CTRL while export_count is %d\n",
+                       dev, dev->export_count);
                return -EINVAL;
        }
-       dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = flag;
+       dev->dev_attrib.emulate_ua_intlck_ctrl = flag;
        pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
-               dev, dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl);
+               dev, dev->dev_attrib.emulate_ua_intlck_ctrl);
 
        return 0;
 }
@@ -1059,15 +805,15 @@ int se_dev_set_emulate_tas(struct se_device *dev, int flag)
                return -EINVAL;
        }
 
-       if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+       if (dev->export_count) {
                pr_err("dev[%p]: Unable to change SE Device TAS while"
-                       " dev_export_obj: %d count exists\n", dev,
-                       atomic_read(&dev->dev_export_obj.obj_access_count));
+                       " export_count is %d\n",
+                       dev, dev->export_count);
                return -EINVAL;
        }
-       dev->se_sub_dev->se_dev_attrib.emulate_tas = flag;
+       dev->dev_attrib.emulate_tas = flag;
        pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
-               dev, (dev->se_sub_dev->se_dev_attrib.emulate_tas) ? "Enabled" : "Disabled");
+               dev, (dev->dev_attrib.emulate_tas) ? "Enabled" : "Disabled");
 
        return 0;
 }
@@ -1082,12 +828,12 @@ int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
         * We expect this value to be non-zero when generic Block Layer
         * Discard supported is detected iblock_create_virtdevice().
         */
-       if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
+       if (flag && !dev->dev_attrib.max_unmap_block_desc_count) {
                pr_err("Generic Block Discard not supported\n");
                return -ENOSYS;
        }
 
-       dev->se_sub_dev->se_dev_attrib.emulate_tpu = flag;
+       dev->dev_attrib.emulate_tpu = flag;
        pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
                                dev, flag);
        return 0;
@@ -1103,12 +849,12 @@ int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
         * We expect this value to be non-zero when generic Block Layer
         * Discard supported is detected iblock_create_virtdevice().
         */
-       if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
+       if (flag && !dev->dev_attrib.max_unmap_block_desc_count) {
                pr_err("Generic Block Discard not supported\n");
                return -ENOSYS;
        }
 
-       dev->se_sub_dev->se_dev_attrib.emulate_tpws = flag;
+       dev->dev_attrib.emulate_tpws = flag;
        pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
                                dev, flag);
        return 0;
@@ -1120,9 +866,9 @@ int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
                pr_err("Illegal value %d\n", flag);
                return -EINVAL;
        }
-       dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = flag;
+       dev->dev_attrib.enforce_pr_isids = flag;
        pr_debug("dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev,
-               (dev->se_sub_dev->se_dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled");
+               (dev->dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled");
        return 0;
 }
 
@@ -1132,7 +878,7 @@ int se_dev_set_is_nonrot(struct se_device *dev, int flag)
                printk(KERN_ERR "Illegal value %d\n", flag);
                return -EINVAL;
        }
-       dev->se_sub_dev->se_dev_attrib.is_nonrot = flag;
+       dev->dev_attrib.is_nonrot = flag;
        pr_debug("dev[%p]: SE Device is_nonrot bit: %d\n",
               dev, flag);
        return 0;
@@ -1145,7 +891,7 @@ int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag)
                        " reordering not implemented\n", dev);
                return -ENOSYS;
        }
-       dev->se_sub_dev->se_dev_attrib.emulate_rest_reord = flag;
+       dev->dev_attrib.emulate_rest_reord = flag;
        pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev, flag);
        return 0;
 }
@@ -1155,10 +901,10 @@ int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag)
  */
 int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
 {
-       if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+       if (dev->export_count) {
                pr_err("dev[%p]: Unable to change SE Device TCQ while"
-                       " dev_export_obj: %d count exists\n", dev,
-                       atomic_read(&dev->dev_export_obj.obj_access_count));
+                       " export_count is %d\n",
+                       dev, dev->export_count);
                return -EINVAL;
        }
        if (!queue_depth) {
@@ -1168,26 +914,26 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
        }
 
        if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
-               if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) {
+               if (queue_depth > dev->dev_attrib.hw_queue_depth) {
                        pr_err("dev[%p]: Passed queue_depth: %u"
                                " exceeds TCM/SE_Device TCQ: %u\n",
                                dev, queue_depth,
-                               dev->se_sub_dev->se_dev_attrib.hw_queue_depth);
+                               dev->dev_attrib.hw_queue_depth);
                        return -EINVAL;
                }
        } else {
-               if (queue_depth > dev->se_sub_dev->se_dev_attrib.queue_depth) {
-                       if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) {
+               if (queue_depth > dev->dev_attrib.queue_depth) {
+                       if (queue_depth > dev->dev_attrib.hw_queue_depth) {
                                pr_err("dev[%p]: Passed queue_depth:"
                                        " %u exceeds TCM/SE_Device MAX"
                                        " TCQ: %u\n", dev, queue_depth,
-                                       dev->se_sub_dev->se_dev_attrib.hw_queue_depth);
+                                       dev->dev_attrib.hw_queue_depth);
                                return -EINVAL;
                        }
                }
        }
 
-       dev->se_sub_dev->se_dev_attrib.queue_depth = dev->queue_depth = queue_depth;
+       dev->dev_attrib.queue_depth = dev->queue_depth = queue_depth;
        pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n",
                        dev, queue_depth);
        return 0;
@@ -1195,10 +941,10 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
 
 int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
 {
-       if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+       if (dev->export_count) {
                pr_err("dev[%p]: Unable to change SE Device"
-                       " fabric_max_sectors while dev_export_obj: %d count exists\n",
-                       dev, atomic_read(&dev->dev_export_obj.obj_access_count));
+                       " fabric_max_sectors while export_count is %d\n",
+                       dev, dev->export_count);
                return -EINVAL;
        }
        if (!fabric_max_sectors) {
@@ -1213,11 +959,11 @@ int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
                return -EINVAL;
        }
        if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
-               if (fabric_max_sectors > dev->se_sub_dev->se_dev_attrib.hw_max_sectors) {
+               if (fabric_max_sectors > dev->dev_attrib.hw_max_sectors) {
                        pr_err("dev[%p]: Passed fabric_max_sectors: %u"
                                " greater than TCM/SE_Device max_sectors:"
                                " %u\n", dev, fabric_max_sectors,
-                               dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
+                               dev->dev_attrib.hw_max_sectors);
                         return -EINVAL;
                }
        } else {
@@ -1233,9 +979,9 @@ int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
         * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
         */
        fabric_max_sectors = se_dev_align_max_sectors(fabric_max_sectors,
-                                                     dev->se_sub_dev->se_dev_attrib.block_size);
+                                                     dev->dev_attrib.block_size);
 
-       dev->se_sub_dev->se_dev_attrib.fabric_max_sectors = fabric_max_sectors;
+       dev->dev_attrib.fabric_max_sectors = fabric_max_sectors;
        pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
                        dev, fabric_max_sectors);
        return 0;
@@ -1243,10 +989,10 @@ int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
 
 int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
 {
-       if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+       if (dev->export_count) {
                pr_err("dev[%p]: Unable to change SE Device"
-                       " optimal_sectors while dev_export_obj: %d count exists\n",
-                       dev, atomic_read(&dev->dev_export_obj.obj_access_count));
+                       " optimal_sectors while export_count is %d\n",
+                       dev, dev->export_count);
                return -EINVAL;
        }
        if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
@@ -1254,14 +1000,14 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
                                " changed for TCM/pSCSI\n", dev);
                return -EINVAL;
        }
-       if (optimal_sectors > dev->se_sub_dev->se_dev_attrib.fabric_max_sectors) {
+       if (optimal_sectors > dev->dev_attrib.fabric_max_sectors) {
                pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
                        " greater than fabric_max_sectors: %u\n", dev,
-                       optimal_sectors, dev->se_sub_dev->se_dev_attrib.fabric_max_sectors);
+                       optimal_sectors, dev->dev_attrib.fabric_max_sectors);
                return -EINVAL;
        }
 
-       dev->se_sub_dev->se_dev_attrib.optimal_sectors = optimal_sectors;
+       dev->dev_attrib.optimal_sectors = optimal_sectors;
        pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n",
                        dev, optimal_sectors);
        return 0;
@@ -1269,10 +1015,10 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
 
 int se_dev_set_block_size(struct se_device *dev, u32 block_size)
 {
-       if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
+       if (dev->export_count) {
                pr_err("dev[%p]: Unable to change SE Device block_size"
-                       " while dev_export_obj: %d count exists\n", dev,
-                       atomic_read(&dev->dev_export_obj.obj_access_count));
+                       " while export_count is %d\n",
+                       dev, dev->export_count);
                return -EINVAL;
        }
 
@@ -1293,7 +1039,7 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size)
                return -EINVAL;
        }
 
-       dev->se_sub_dev->se_dev_attrib.block_size = block_size;
+       dev->dev_attrib.block_size = block_size;
        pr_debug("dev[%p]: SE Device block_size changed to %u\n",
                        dev, block_size);
        return 0;
@@ -1307,12 +1053,6 @@ struct se_lun *core_dev_add_lun(
        struct se_lun *lun_p;
        int rc;
 
-       if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) {
-               pr_err("Unable to export struct se_device while dev_access_obj: %d\n",
-                       atomic_read(&dev->dev_access_obj.obj_access_count));
-               return ERR_PTR(-EACCES);
-       }
-
        lun_p = core_tpg_pre_addlun(tpg, lun);
        if (IS_ERR(lun_p))
                return lun_p;
@@ -1568,12 +1308,211 @@ void core_dev_free_initiator_node_lun_acl(
        kfree(lacl);
 }
 
+static void scsi_dump_inquiry(struct se_device *dev)
+{
+       struct t10_wwn *wwn = &dev->t10_wwn;
+       char buf[17];
+       int i, device_type;
+       /*
+        * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
+        */
+       for (i = 0; i < 8; i++)
+               if (wwn->vendor[i] >= 0x20)
+                       buf[i] = wwn->vendor[i];
+               else
+                       buf[i] = ' ';
+       buf[i] = '\0';
+       pr_debug("  Vendor: %s\n", buf);
+
+       for (i = 0; i < 16; i++)
+               if (wwn->model[i] >= 0x20)
+                       buf[i] = wwn->model[i];
+               else
+                       buf[i] = ' ';
+       buf[i] = '\0';
+       pr_debug("  Model: %s\n", buf);
+
+       for (i = 0; i < 4; i++)
+               if (wwn->revision[i] >= 0x20)
+                       buf[i] = wwn->revision[i];
+               else
+                       buf[i] = ' ';
+       buf[i] = '\0';
+       pr_debug("  Revision: %s\n", buf);
+
+       device_type = dev->transport->get_device_type(dev);
+       pr_debug("  Type:   %s ", scsi_device_type(device_type));
+}
+
+struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
+{
+       struct se_device *dev;
+
+       dev = hba->transport->alloc_device(hba, name);
+       if (!dev)
+               return NULL;
+
+       dev->dev_link_magic = SE_DEV_LINK_MAGIC;
+       dev->se_hba = hba;
+       dev->transport = hba->transport;
+
+       INIT_LIST_HEAD(&dev->dev_list);
+       INIT_LIST_HEAD(&dev->dev_sep_list);
+       INIT_LIST_HEAD(&dev->dev_tmr_list);
+       INIT_LIST_HEAD(&dev->delayed_cmd_list);
+       INIT_LIST_HEAD(&dev->state_list);
+       INIT_LIST_HEAD(&dev->qf_cmd_list);
+       spin_lock_init(&dev->stats_lock);
+       spin_lock_init(&dev->execute_task_lock);
+       spin_lock_init(&dev->delayed_cmd_lock);
+       spin_lock_init(&dev->dev_reservation_lock);
+       spin_lock_init(&dev->se_port_lock);
+       spin_lock_init(&dev->se_tmr_lock);
+       spin_lock_init(&dev->qf_cmd_lock);
+       atomic_set(&dev->dev_ordered_id, 0);
+       INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
+       spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
+       INIT_LIST_HEAD(&dev->t10_pr.registration_list);
+       INIT_LIST_HEAD(&dev->t10_pr.aptpl_reg_list);
+       spin_lock_init(&dev->t10_pr.registration_lock);
+       spin_lock_init(&dev->t10_pr.aptpl_reg_lock);
+       INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list);
+       spin_lock_init(&dev->t10_alua.tg_pt_gps_lock);
+
+       dev->t10_pr.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
+       dev->t10_wwn.t10_dev = dev;
+       dev->t10_alua.t10_dev = dev;
+
+       dev->dev_attrib.da_dev = dev;
+       dev->dev_attrib.emulate_dpo = DA_EMULATE_DPO;
+       dev->dev_attrib.emulate_fua_write = DA_EMULATE_FUA_WRITE;
+       dev->dev_attrib.emulate_fua_read = DA_EMULATE_FUA_READ;
+       dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
+       dev->dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
+       dev->dev_attrib.emulate_tas = DA_EMULATE_TAS;
+       dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU;
+       dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
+       dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
+       dev->dev_attrib.is_nonrot = DA_IS_NONROT;
+       dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
+       dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
+       dev->dev_attrib.max_unmap_block_desc_count =
+               DA_MAX_UNMAP_BLOCK_DESC_COUNT;
+       dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
+       dev->dev_attrib.unmap_granularity_alignment =
+                               DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
+       dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
+       dev->dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS;
+       dev->dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS;
+
+       return dev;
+}
+
+int target_configure_device(struct se_device *dev)
+{
+       struct se_hba *hba = dev->se_hba;
+       int ret;
+
+       if (dev->dev_flags & DF_CONFIGURED) {
+               pr_err("se_dev->se_dev_ptr already set for storage"
+                               " object\n");
+               return -EEXIST;
+       }
+
+       ret = dev->transport->configure_device(dev);
+       if (ret)
+               goto out;
+       dev->dev_flags |= DF_CONFIGURED;
+
+       /*
+        * XXX: there is not much point to have two different values here..
+        */
+       dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size;
+       dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth;
+
+       /*
+        * Align max_hw_sectors down to PAGE_SIZE I/O transfers
+        */
+       dev->dev_attrib.hw_max_sectors =
+               se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors,
+                                        dev->dev_attrib.hw_block_size);
+
+       dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
+       dev->creation_time = get_jiffies_64();
+
+       ret = core_setup_alua(dev);
+       if (ret)
+               goto out;
+
+       /*
+        * Startup the struct se_device processing thread
+        */
+       dev->tmr_wq = alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 1,
+                                     dev->transport->name);
+       if (!dev->tmr_wq) {
+               pr_err("Unable to create tmr workqueue for %s\n",
+                       dev->transport->name);
+               ret = -ENOMEM;
+               goto out_free_alua;
+       }
+
+       /*
+        * Setup work_queue for QUEUE_FULL
+        */
+       INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
+
+       /*
+        * Preload the initial INQUIRY const values if we are doing
+        * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
+        * passthrough because this is being provided by the backend LLD.
+        */
+       if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
+               strncpy(&dev->t10_wwn.vendor[0], "LIO-ORG", 8);
+               strncpy(&dev->t10_wwn.model[0],
+                       dev->transport->inquiry_prod, 16);
+               strncpy(&dev->t10_wwn.revision[0],
+                       dev->transport->inquiry_rev, 4);
+       }
+
+       scsi_dump_inquiry(dev);
+
+       spin_lock(&hba->device_lock);
+       hba->dev_count++;
+       spin_unlock(&hba->device_lock);
+       return 0;
+
+out_free_alua:
+       core_alua_free_lu_gp_mem(dev);
+out:
+       se_release_vpd_for_dev(dev);
+       return ret;
+}
+
+void target_free_device(struct se_device *dev)
+{
+       struct se_hba *hba = dev->se_hba;
+
+       WARN_ON(!list_empty(&dev->dev_sep_list));
+
+       if (dev->dev_flags & DF_CONFIGURED) {
+               destroy_workqueue(dev->tmr_wq);
+
+               spin_lock(&hba->device_lock);
+               hba->dev_count--;
+               spin_unlock(&hba->device_lock);
+       }
+
+       core_alua_free_lu_gp_mem(dev);
+       core_scsi3_free_all_registrations(dev);
+       se_release_vpd_for_dev(dev);
+
+       dev->transport->free_device(dev);
+}
+
 int core_dev_setup_virtual_lun0(void)
 {
        struct se_hba *hba;
        struct se_device *dev;
-       struct se_subsystem_dev *se_dev = NULL;
-       struct se_subsystem_api *t;
        char buf[16];
        int ret;
 
@@ -1581,60 +1520,28 @@ int core_dev_setup_virtual_lun0(void)
        if (IS_ERR(hba))
                return PTR_ERR(hba);
 
-       lun0_hba = hba;
-       t = hba->transport;
-
-       se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL);
-       if (!se_dev) {
-               pr_err("Unable to allocate memory for"
-                               " struct se_subsystem_dev\n");
+       dev = target_alloc_device(hba, "virt_lun0");
+       if (!dev) {
                ret = -ENOMEM;
-               goto out;
+               goto out_free_hba;
        }
-       INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
-       spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
-       INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
-       INIT_LIST_HEAD(&se_dev->t10_pr.aptpl_reg_list);
-       spin_lock_init(&se_dev->t10_pr.registration_lock);
-       spin_lock_init(&se_dev->t10_pr.aptpl_reg_lock);
-       INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list);
-       spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock);
-       spin_lock_init(&se_dev->se_dev_lock);
-       se_dev->t10_pr.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
-       se_dev->t10_wwn.t10_sub_dev = se_dev;
-       se_dev->t10_alua.t10_sub_dev = se_dev;
-       se_dev->se_dev_attrib.da_sub_dev = se_dev;
-       se_dev->se_dev_hba = hba;
-
-       se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, "virt_lun0");
-       if (!se_dev->se_dev_su_ptr) {
-               pr_err("Unable to locate subsystem dependent pointer"
-                       " from allocate_virtdevice()\n");
-               ret = -ENOMEM;
-               goto out;
-       }
-       lun0_su_dev = se_dev;
 
        memset(buf, 0, 16);
        sprintf(buf, "rd_pages=8");
-       t->set_configfs_dev_params(hba, se_dev, buf, sizeof(buf));
+       hba->transport->set_configfs_dev_params(dev, buf, sizeof(buf));
 
-       dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr);
-       if (IS_ERR(dev)) {
-               ret = PTR_ERR(dev);
-               goto out;
-       }
-       se_dev->se_dev_ptr = dev;
-       g_lun0_dev = dev;
+       ret = target_configure_device(dev);
+       if (ret)
+               goto out_free_se_dev;
 
+       lun0_hba = hba;
+       g_lun0_dev = dev;
        return 0;
-out:
-       lun0_su_dev = NULL;
-       kfree(se_dev);
-       if (lun0_hba) {
-               core_delete_hba(lun0_hba);
-               lun0_hba = NULL;
-       }
+
+out_free_se_dev:
+       target_free_device(dev);
+out_free_hba:
+       core_delete_hba(hba);
        return ret;
 }
 
@@ -1642,14 +1549,11 @@ out:
 void core_dev_release_virtual_lun0(void)
 {
        struct se_hba *hba = lun0_hba;
-       struct se_subsystem_dev *su_dev = lun0_su_dev;
 
        if (!hba)
                return;
 
        if (g_lun0_dev)
-               se_free_virtual_device(g_lun0_dev, hba);
-
-       kfree(su_dev);
+               target_free_device(g_lun0_dev);
        core_delete_hba(hba);
 }
index bca737b..810263d 100644 (file)
@@ -4,10 +4,9 @@
  * This file contains generic fabric module configfs infrastructure for
  * TCM v4.x code
  *
- * Copyright (c) 2010,2011 Rising Tide Systems
- * Copyright (c) 2010,2011 Linux-iSCSI.org
+ * (c) Copyright 2010-2012 RisingTide Systems LLC.
  *
- * Copyright (c) Nicholas A. Bellinger <nab@linux-iscsi.org>
+ * Nicholas A. Bellinger <nab@linux-iscsi.org>
 *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -71,6 +70,12 @@ static int target_fabric_mappedlun_link(
        struct se_portal_group *se_tpg;
        struct config_item *nacl_ci, *tpg_ci, *tpg_ci_s, *wwn_ci, *wwn_ci_s;
        int ret = 0, lun_access;
+
+       if (lun->lun_link_magic != SE_LUN_LINK_MAGIC) {
+               pr_err("Bad lun->lun_link_magic, not a valid lun_ci pointer:"
+                       " %p to struct lun: %p\n", lun_ci, lun);
+               return -EFAULT;
+       }
        /*
         * Ensure that the source port exists
         */
@@ -358,7 +363,7 @@ static struct config_group *target_fabric_make_mappedlun(
        }
 
        lacl_cg = &lacl->se_lun_group;
-       lacl_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+       lacl_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
                                GFP_KERNEL);
        if (!lacl_cg->default_groups) {
                pr_err("Unable to allocate lacl_cg->default_groups\n");
@@ -374,7 +379,7 @@ static struct config_group *target_fabric_make_mappedlun(
        lacl_cg->default_groups[1] = NULL;
 
        ml_stat_grp = &lacl->ml_stat_grps.stat_group;
-       ml_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3,
+       ml_stat_grp->default_groups = kmalloc(sizeof(struct config_group *) * 3,
                                GFP_KERNEL);
        if (!ml_stat_grp->default_groups) {
                pr_err("Unable to allocate ml_stat_grp->default_groups\n");
@@ -734,17 +739,21 @@ static int target_fabric_port_link(
        struct config_item *se_dev_ci)
 {
        struct config_item *tpg_ci;
-       struct se_device *dev;
        struct se_lun *lun = container_of(to_config_group(lun_ci),
                                struct se_lun, lun_group);
        struct se_lun *lun_p;
        struct se_portal_group *se_tpg;
-       struct se_subsystem_dev *se_dev = container_of(
-                               to_config_group(se_dev_ci), struct se_subsystem_dev,
-                               se_dev_group);
+       struct se_device *dev =
+               container_of(to_config_group(se_dev_ci), struct se_device, dev_group);
        struct target_fabric_configfs *tf;
        int ret;
 
+       if (dev->dev_link_magic != SE_DEV_LINK_MAGIC) {
+               pr_err("Bad dev->dev_link_magic, not a valid se_dev_ci pointer:"
+                       " %p to struct se_device: %p\n", se_dev_ci, dev);
+               return -EFAULT;
+       }
+
        tpg_ci = &lun_ci->ci_parent->ci_group->cg_item;
        se_tpg = container_of(to_config_group(tpg_ci),
                                struct se_portal_group, tpg_group);
@@ -755,14 +764,6 @@ static int target_fabric_port_link(
                return -EEXIST;
        }
 
-       dev = se_dev->se_dev_ptr;
-       if (!dev) {
-               pr_err("Unable to locate struct se_device pointer from"
-                       " %s\n", config_item_name(se_dev_ci));
-               ret = -ENODEV;
-               goto out;
-       }
-
        lun_p = core_dev_add_lun(se_tpg, dev, lun->unpacked_lun);
        if (IS_ERR(lun_p)) {
                pr_err("core_dev_add_lun() failed\n");
@@ -869,7 +870,7 @@ static struct config_group *target_fabric_make_lun(
                return ERR_PTR(-EINVAL);
 
        lun_cg = &lun->lun_group;
-       lun_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+       lun_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
                                GFP_KERNEL);
        if (!lun_cg->default_groups) {
                pr_err("Unable to allocate lun_cg->default_groups\n");
index e460d62..687b0b0 100644 (file)
@@ -4,8 +4,7 @@
  * This file contains generic high level protocol identifier and PR
  * handlers for TCM fabric modules
  *
- * Copyright (c) 2010 Rising Tide Systems, Inc.
- * Copyright (c) 2010 Linux-iSCSI.org
+ * (c) Copyright 2010-2012 RisingTide Systems LLC.
  *
  * Nicholas A. Bellinger <nab@linux-iscsi.org>
  *
index 0360383..b9c8849 100644 (file)
@@ -3,10 +3,7 @@
  *
  * This file contains the Storage Engine <-> FILEIO transport specific functions
  *
- * Copyright (c) 2005 PyX Technologies, Inc.
- * Copyright (c) 2005-2006 SBE, Inc.  All Rights Reserved.
- * Copyright (c) 2007-2010 Rising Tide Systems
- * Copyright (c) 2008-2010 Linux-iSCSI.org
+ * (c) Copyright 2005-2012 RisingTide Systems LLC.
  *
  * Nicholas A. Bellinger <nab@kernel.org>
  *
 
 #include "target_core_file.h"
 
-static struct se_subsystem_api fileio_template;
+static inline struct fd_dev *FD_DEV(struct se_device *dev)
+{
+       return container_of(dev, struct fd_dev, dev);
+}
 
 /*     fd_attach_hba(): (Part of se_subsystem_api_t template)
  *
@@ -82,7 +82,7 @@ static void fd_detach_hba(struct se_hba *hba)
        hba->hba_ptr = NULL;
 }
 
-static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name)
+static struct se_device *fd_alloc_device(struct se_hba *hba, const char *name)
 {
        struct fd_dev *fd_dev;
        struct fd_host *fd_host = hba->hba_ptr;
@@ -97,34 +97,28 @@ static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name)
 
        pr_debug("FILEIO: Allocated fd_dev for %p\n", name);
 
-       return fd_dev;
+       return &fd_dev->dev;
 }
 
-/*     fd_create_virtdevice(): (Part of se_subsystem_api_t template)
- *
- *
- */
-static struct se_device *fd_create_virtdevice(
-       struct se_hba *hba,
-       struct se_subsystem_dev *se_dev,
-       void *p)
+static int fd_configure_device(struct se_device *dev)
 {
-       struct se_device *dev;
-       struct se_dev_limits dev_limits;
-       struct queue_limits *limits;
-       struct fd_dev *fd_dev = p;
-       struct fd_host *fd_host = hba->hba_ptr;
+       struct fd_dev *fd_dev = FD_DEV(dev);
+       struct fd_host *fd_host = dev->se_hba->hba_ptr;
        struct file *file;
        struct inode *inode = NULL;
-       int dev_flags = 0, flags, ret = -EINVAL;
+       int flags, ret = -EINVAL;
 
-       memset(&dev_limits, 0, sizeof(struct se_dev_limits));
+       if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
+               pr_err("Missing fd_dev_name=\n");
+               return -EINVAL;
+       }
 
        /*
         * Use O_DSYNC by default instead of O_SYNC to forgo syncing
         * of pure timestamp updates.
         */
        flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC;
+
        /*
         * Optionally allow fd_buffered_io=1 to be enabled for people
         * who want use the fs buffer cache as an WriteCache mechanism.
@@ -154,22 +148,17 @@ static struct se_device *fd_create_virtdevice(
         */
        inode = file->f_mapping->host;
        if (S_ISBLK(inode->i_mode)) {
-               struct request_queue *q;
+               struct request_queue *q = bdev_get_queue(inode->i_bdev);
                unsigned long long dev_size;
-               /*
-                * Setup the local scope queue_limits from struct request_queue->limits
-                * to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
-                */
-               q = bdev_get_queue(inode->i_bdev);
-               limits = &dev_limits.limits;
-               limits->logical_block_size = bdev_logical_block_size(inode->i_bdev);
-               limits->max_hw_sectors = queue_max_hw_sectors(q);
-               limits->max_sectors = queue_max_sectors(q);
+
+               dev->dev_attrib.hw_block_size =
+                       bdev_logical_block_size(inode->i_bdev);
+               dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
+
                /*
                 * Determine the number of bytes from i_size_read() minus
                 * one (1) logical sector from underlying struct block_device
                 */
-               fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev);
                dev_size = (i_size_read(file->f_mapping->host) -
                                       fd_dev->fd_block_size);
 
@@ -185,26 +174,18 @@ static struct se_device *fd_create_virtdevice(
                        goto fail;
                }
 
-               limits = &dev_limits.limits;
-               limits->logical_block_size = FD_BLOCKSIZE;
-               limits->max_hw_sectors = FD_MAX_SECTORS;
-               limits->max_sectors = FD_MAX_SECTORS;
-               fd_dev->fd_block_size = FD_BLOCKSIZE;
+               dev->dev_attrib.hw_block_size = FD_BLOCKSIZE;
+               dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS;
        }
 
-       dev_limits.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
-       dev_limits.queue_depth = FD_DEVICE_QUEUE_DEPTH;
+       fd_dev->fd_block_size = dev->dev_attrib.hw_block_size;
 
-       dev = transport_add_device_to_core_hba(hba, &fileio_template,
-                               se_dev, dev_flags, fd_dev,
-                               &dev_limits, "FILEIO", FD_VERSION);
-       if (!dev)
-               goto fail;
+       dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
 
        if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
                pr_debug("FILEIO: Forcing setting of emulate_write_cache=1"
                        " with FDBD_HAS_BUFFERED_IO_WCE\n");
-               dev->se_sub_dev->se_dev_attrib.emulate_write_cache = 1;
+               dev->dev_attrib.emulate_write_cache = 1;
        }
 
        fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++;
@@ -214,22 +195,18 @@ static struct se_device *fd_create_virtdevice(
                " %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id,
                        fd_dev->fd_dev_name, fd_dev->fd_dev_size);
 
-       return dev;
+       return 0;
 fail:
        if (fd_dev->fd_file) {
                filp_close(fd_dev->fd_file, NULL);
                fd_dev->fd_file = NULL;
        }
-       return ERR_PTR(ret);
+       return ret;
 }
 
-/*     fd_free_device(): (Part of se_subsystem_api_t template)
- *
- *
- */
-static void fd_free_device(void *p)
+static void fd_free_device(struct se_device *dev)
 {
-       struct fd_dev *fd_dev = p;
+       struct fd_dev *fd_dev = FD_DEV(dev);
 
        if (fd_dev->fd_file) {
                filp_close(fd_dev->fd_file, NULL);
@@ -239,17 +216,16 @@ static void fd_free_device(void *p)
        kfree(fd_dev);
 }
 
-static int fd_do_readv(struct se_cmd *cmd, struct scatterlist *sgl,
-               u32 sgl_nents)
+static int fd_do_rw(struct se_cmd *cmd, struct scatterlist *sgl,
+               u32 sgl_nents, int is_write)
 {
        struct se_device *se_dev = cmd->se_dev;
-       struct fd_dev *dev = se_dev->dev_ptr;
+       struct fd_dev *dev = FD_DEV(se_dev);
        struct file *fd = dev->fd_file;
        struct scatterlist *sg;
        struct iovec *iov;
        mm_segment_t old_fs;
-       loff_t pos = (cmd->t_task_lba *
-                     se_dev->se_sub_dev->se_dev_attrib.block_size);
+       loff_t pos = (cmd->t_task_lba * se_dev->dev_attrib.block_size);
        int ret = 0, i;
 
        iov = kzalloc(sizeof(struct iovec) * sgl_nents, GFP_KERNEL);
@@ -260,81 +236,58 @@ static int fd_do_readv(struct se_cmd *cmd, struct scatterlist *sgl,
 
        for_each_sg(sgl, sg, sgl_nents, i) {
                iov[i].iov_len = sg->length;
-               iov[i].iov_base = sg_virt(sg);
+               iov[i].iov_base = kmap(sg_page(sg)) + sg->offset;
        }
 
        old_fs = get_fs();
        set_fs(get_ds());
-       ret = vfs_readv(fd, &iov[0], sgl_nents, &pos);
+
+       if (is_write)
+               ret = vfs_writev(fd, &iov[0], sgl_nents, &pos);
+       else
+               ret = vfs_readv(fd, &iov[0], sgl_nents, &pos);
+
        set_fs(old_fs);
 
+       for_each_sg(sgl, sg, sgl_nents, i)
+               kunmap(sg_page(sg));
+
        kfree(iov);
-       /*
-        * Return zeros and GOOD status even if the READ did not return
-        * the expected virt_size for struct file w/o a backing struct
-        * block_device.
-        */
-       if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) {
+
+       if (is_write) {
                if (ret < 0 || ret != cmd->data_length) {
-                       pr_err("vfs_readv() returned %d,"
-                               " expecting %d for S_ISBLK\n", ret,
-                               (int)cmd->data_length);
+                       pr_err("%s() write returned %d\n", __func__, ret);
                        return (ret < 0 ? ret : -EINVAL);
                }
        } else {
-               if (ret < 0) {
-                       pr_err("vfs_readv() returned %d for non"
-                               " S_ISBLK\n", ret);
-                       return ret;
+               /*
+                * Return zeros and GOOD status even if the READ did not return
+                * the expected virt_size for struct file w/o a backing struct
+                * block_device.
+                */
+               if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) {
+                       if (ret < 0 || ret != cmd->data_length) {
+                               pr_err("%s() returned %d, expecting %u for "
+                                               "S_ISBLK\n", __func__, ret,
+                                               cmd->data_length);
+                               return (ret < 0 ? ret : -EINVAL);
+                       }
+               } else {
+                       if (ret < 0) {
+                               pr_err("%s() returned %d for non S_ISBLK\n",
+                                               __func__, ret);
+                               return ret;
+                       }
                }
        }
-
-       return 1;
-}
-
-static int fd_do_writev(struct se_cmd *cmd, struct scatterlist *sgl,
-               u32 sgl_nents)
-{
-       struct se_device *se_dev = cmd->se_dev;
-       struct fd_dev *dev = se_dev->dev_ptr;
-       struct file *fd = dev->fd_file;
-       struct scatterlist *sg;
-       struct iovec *iov;
-       mm_segment_t old_fs;
-       loff_t pos = (cmd->t_task_lba *
-                     se_dev->se_sub_dev->se_dev_attrib.block_size);
-       int ret, i = 0;
-
-       iov = kzalloc(sizeof(struct iovec) * sgl_nents, GFP_KERNEL);
-       if (!iov) {
-               pr_err("Unable to allocate fd_do_writev iov[]\n");
-               return -ENOMEM;
-       }
-
-       for_each_sg(sgl, sg, sgl_nents, i) {
-               iov[i].iov_len = sg->length;
-               iov[i].iov_base = sg_virt(sg);
-       }
-
-       old_fs = get_fs();
-       set_fs(get_ds());
-       ret = vfs_writev(fd, &iov[0], sgl_nents, &pos);
-       set_fs(old_fs);
-
-       kfree(iov);
-
-       if (ret < 0 || ret != cmd->data_length) {
-               pr_err("vfs_writev() returned %d\n", ret);
-               return (ret < 0 ? ret : -EINVAL);
-       }
-
        return 1;
 }
 
-static int fd_execute_sync_cache(struct se_cmd *cmd)
+static sense_reason_t
+fd_execute_sync_cache(struct se_cmd *cmd)
 {
        struct se_device *dev = cmd->se_dev;
-       struct fd_dev *fd_dev = dev->dev_ptr;
+       struct fd_dev *fd_dev = FD_DEV(dev);
        int immed = (cmd->t_task_cdb[1] & 0x2);
        loff_t start, end;
        int ret;
@@ -353,7 +306,7 @@ static int fd_execute_sync_cache(struct se_cmd *cmd)
                start = 0;
                end = LLONG_MAX;
        } else {
-               start = cmd->t_task_lba * dev->se_sub_dev->se_dev_attrib.block_size;
+               start = cmd->t_task_lba * dev->dev_attrib.block_size;
                if (cmd->data_length)
                        end = start + cmd->data_length;
                else
@@ -367,17 +320,16 @@ static int fd_execute_sync_cache(struct se_cmd *cmd)
        if (immed)
                return 0;
 
-       if (ret) {
-               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+       if (ret)
                target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
-       } else {
+       else
                target_complete_cmd(cmd, SAM_STAT_GOOD);
-       }
 
        return 0;
 }
 
-static int fd_execute_rw(struct se_cmd *cmd)
+static sense_reason_t
+fd_execute_rw(struct se_cmd *cmd)
 {
        struct scatterlist *sgl = cmd->t_data_sg;
        u32 sgl_nents = cmd->t_data_nents;
@@ -390,30 +342,29 @@ static int fd_execute_rw(struct se_cmd *cmd)
         * physical memory addresses to struct iovec virtual memory.
         */
        if (data_direction == DMA_FROM_DEVICE) {
-               ret = fd_do_readv(cmd, sgl, sgl_nents);
+               ret = fd_do_rw(cmd, sgl, sgl_nents, 0);
        } else {
-               ret = fd_do_writev(cmd, sgl, sgl_nents);
+               ret = fd_do_rw(cmd, sgl, sgl_nents, 1);
                /*
                 * Perform implict vfs_fsync_range() for fd_do_writev() ops
                 * for SCSI WRITEs with Forced Unit Access (FUA) set.
                 * Allow this to happen independent of WCE=0 setting.
                 */
                if (ret > 0 &&
-                   dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
+                   dev->dev_attrib.emulate_fua_write > 0 &&
                    (cmd->se_cmd_flags & SCF_FUA)) {
-                       struct fd_dev *fd_dev = dev->dev_ptr;
+                       struct fd_dev *fd_dev = FD_DEV(dev);
                        loff_t start = cmd->t_task_lba *
-                               dev->se_sub_dev->se_dev_attrib.block_size;
+                               dev->dev_attrib.block_size;
                        loff_t end = start + cmd->data_length;
 
                        vfs_fsync_range(fd_dev->fd_file, start, end, 1);
                }
        }
 
-       if (ret < 0) {
-               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-               return ret;
-       }
+       if (ret < 0)
+               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
        if (ret)
                target_complete_cmd(cmd, SAM_STAT_GOOD);
        return 0;
@@ -430,12 +381,10 @@ static match_table_t tokens = {
        {Opt_err, NULL}
 };
 
-static ssize_t fd_set_configfs_dev_params(
-       struct se_hba *hba,
-       struct se_subsystem_dev *se_dev,
-       const char *page, ssize_t count)
+static ssize_t fd_set_configfs_dev_params(struct se_device *dev,
+               const char *page, ssize_t count)
 {
-       struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
+       struct fd_dev *fd_dev = FD_DEV(dev);
        char *orig, *ptr, *arg_p, *opts;
        substring_t args[MAX_OPT_ARGS];
        int ret = 0, arg, token;
@@ -502,24 +451,9 @@ out:
        return (!ret) ? count : ret;
 }
 
-static ssize_t fd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev)
+static ssize_t fd_show_configfs_dev_params(struct se_device *dev, char *b)
 {
-       struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
-
-       if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
-               pr_err("Missing fd_dev_name=\n");
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static ssize_t fd_show_configfs_dev_params(
-       struct se_hba *hba,
-       struct se_subsystem_dev *se_dev,
-       char *b)
-{
-       struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
+       struct fd_dev *fd_dev = FD_DEV(dev);
        ssize_t bl = 0;
 
        bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id);
@@ -530,27 +464,9 @@ static ssize_t fd_show_configfs_dev_params(
        return bl;
 }
 
-/*     fd_get_device_rev(): (Part of se_subsystem_api_t template)
- *
- *
- */
-static u32 fd_get_device_rev(struct se_device *dev)
-{
-       return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
-}
-
-/*     fd_get_device_type(): (Part of se_subsystem_api_t template)
- *
- *
- */
-static u32 fd_get_device_type(struct se_device *dev)
-{
-       return TYPE_DISK;
-}
-
 static sector_t fd_get_blocks(struct se_device *dev)
 {
-       struct fd_dev *fd_dev = dev->dev_ptr;
+       struct fd_dev *fd_dev = FD_DEV(dev);
        struct file *f = fd_dev->fd_file;
        struct inode *i = f->f_mapping->host;
        unsigned long long dev_size;
@@ -564,34 +480,35 @@ static sector_t fd_get_blocks(struct se_device *dev)
        else
                dev_size = fd_dev->fd_dev_size;
 
-       return div_u64(dev_size, dev->se_sub_dev->se_dev_attrib.block_size);
+       return div_u64(dev_size, dev->dev_attrib.block_size);
 }
 
-static struct spc_ops fd_spc_ops = {
+static struct sbc_ops fd_sbc_ops = {
        .execute_rw             = fd_execute_rw,
        .execute_sync_cache     = fd_execute_sync_cache,
 };
 
-static int fd_parse_cdb(struct se_cmd *cmd)
+static sense_reason_t
+fd_parse_cdb(struct se_cmd *cmd)
 {
-       return sbc_parse_cdb(cmd, &fd_spc_ops);
+       return sbc_parse_cdb(cmd, &fd_sbc_ops);
 }
 
 static struct se_subsystem_api fileio_template = {
        .name                   = "fileio",
+       .inquiry_prod           = "FILEIO",
+       .inquiry_rev            = FD_VERSION,
        .owner                  = THIS_MODULE,
        .transport_type         = TRANSPORT_PLUGIN_VHBA_PDEV,
        .attach_hba             = fd_attach_hba,
        .detach_hba             = fd_detach_hba,
-       .allocate_virtdevice    = fd_allocate_virtdevice,
-       .create_virtdevice      = fd_create_virtdevice,
+       .alloc_device           = fd_alloc_device,
+       .configure_device       = fd_configure_device,
        .free_device            = fd_free_device,
        .parse_cdb              = fd_parse_cdb,
-       .check_configfs_dev_params = fd_check_configfs_dev_params,
        .set_configfs_dev_params = fd_set_configfs_dev_params,
        .show_configfs_dev_params = fd_show_configfs_dev_params,
-       .get_device_rev         = fd_get_device_rev,
-       .get_device_type        = fd_get_device_type,
+       .get_device_type        = sbc_get_device_type,
        .get_blocks             = fd_get_blocks,
 };
 
index 876ae53..bc02b01 100644 (file)
@@ -17,6 +17,8 @@
 #define FDBD_HAS_BUFFERED_IO_WCE 0x04
 
 struct fd_dev {
+       struct se_device dev;
+
        u32             fbd_flags;
        unsigned char   fd_dev_name[FD_MAX_DEV_NAME];
        /* Unique Ramdisk Device ID in Ramdisk HBA */
index 3dd1bd4..d2616cd 100644 (file)
@@ -3,10 +3,7 @@
  *
  * This file contains the TCM HBA Transport related functions.
  *
- * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
- * Copyright (c) 2005, 2006, 2007 SBE, Inc.
- * Copyright (c) 2007-2010 Rising Tide Systems
- * Copyright (c) 2008-2010 Linux-iSCSI.org
+ * (c) Copyright 2003-2012 RisingTide Systems LLC.
  *
  * Nicholas A. Bellinger <nab@kernel.org>
  *
@@ -113,7 +110,6 @@ core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags)
                return ERR_PTR(-ENOMEM);
        }
 
-       INIT_LIST_HEAD(&hba->hba_dev_list);
        spin_lock_init(&hba->device_lock);
        mutex_init(&hba->hba_access_mutex);
 
@@ -152,8 +148,7 @@ out_free_hba:
 int
 core_delete_hba(struct se_hba *hba)
 {
-       if (!list_empty(&hba->hba_dev_list))
-               dump_stack();
+       WARN_ON(hba->dev_count);
 
        hba->transport->detach_hba(hba);
 
index 57d7674..b526d23 100644 (file)
@@ -4,10 +4,7 @@
  * This file contains the Storage Engine  <-> Linux BlockIO transport
  * specific functions.
  *
- * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
- * Copyright (c) 2005, 2006, 2007 SBE, Inc.
- * Copyright (c) 2007-2010 Rising Tide Systems
- * Copyright (c) 2008-2010 Linux-iSCSI.org
+ * (c) Copyright 2003-2012 RisingTide Systems LLC.
  *
  * Nicholas A. Bellinger <nab@kernel.org>
  *
 #define IBLOCK_MAX_BIO_PER_TASK         32     /* max # of bios to submit at a time */
 #define IBLOCK_BIO_POOL_SIZE   128
 
-static struct se_subsystem_api iblock_template;
+static inline struct iblock_dev *IBLOCK_DEV(struct se_device *dev)
+{
+       return container_of(dev, struct iblock_dev, dev);
+}
+
 
-static void iblock_bio_done(struct bio *, int);
+static struct se_subsystem_api iblock_template;
 
 /*     iblock_attach_hba(): (Part of se_subsystem_api_t template)
  *
@@ -70,7 +71,7 @@ static void iblock_detach_hba(struct se_hba *hba)
 {
 }
 
-static void *iblock_allocate_virtdevice(struct se_hba *hba, const char *name)
+static struct se_device *iblock_alloc_device(struct se_hba *hba, const char *name)
 {
        struct iblock_dev *ib_dev = NULL;
 
@@ -82,40 +83,28 @@ static void *iblock_allocate_virtdevice(struct se_hba *hba, const char *name)
 
        pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name);
 
-       return ib_dev;
+       return &ib_dev->dev;
 }
 
-static struct se_device *iblock_create_virtdevice(
-       struct se_hba *hba,
-       struct se_subsystem_dev *se_dev,
-       void *p)
+static int iblock_configure_device(struct se_device *dev)
 {
-       struct iblock_dev *ib_dev = p;
-       struct se_device *dev;
-       struct se_dev_limits dev_limits;
-       struct block_device *bd = NULL;
+       struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
        struct request_queue *q;
-       struct queue_limits *limits;
-       u32 dev_flags = 0;
+       struct block_device *bd = NULL;
        fmode_t mode;
-       int ret = -EINVAL;
+       int ret = -ENOMEM;
 
-       if (!ib_dev) {
-               pr_err("Unable to locate struct iblock_dev parameter\n");
-               return ERR_PTR(ret);
+       if (!(ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)) {
+               pr_err("Missing udev_path= parameters for IBLOCK\n");
+               return -EINVAL;
        }
-       memset(&dev_limits, 0, sizeof(struct se_dev_limits));
 
        ib_dev->ibd_bio_set = bioset_create(IBLOCK_BIO_POOL_SIZE, 0);
        if (!ib_dev->ibd_bio_set) {
-               pr_err("IBLOCK: Unable to create bioset()\n");
-               return ERR_PTR(-ENOMEM);
+               pr_err("IBLOCK: Unable to create bioset\n");
+               goto out;
        }
-       pr_debug("IBLOCK: Created bio_set()\n");
-       /*
-        * iblock_check_configfs_dev_params() ensures that ib_dev->ibd_udev_path
-        * must already have been set in order for echo 1 > $HBA/$DEV/enable to run.
-        */
+
        pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
                        ib_dev->ibd_udev_path);
 
@@ -126,27 +115,15 @@ static struct se_device *iblock_create_virtdevice(
        bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev);
        if (IS_ERR(bd)) {
                ret = PTR_ERR(bd);
-               goto failed;
+               goto out_free_bioset;
        }
-       /*
-        * Setup the local scope queue_limits from struct request_queue->limits
-        * to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
-        */
-       q = bdev_get_queue(bd);
-       limits = &dev_limits.limits;
-       limits->logical_block_size = bdev_logical_block_size(bd);
-       limits->max_hw_sectors = UINT_MAX;
-       limits->max_sectors = UINT_MAX;
-       dev_limits.hw_queue_depth = q->nr_requests;
-       dev_limits.queue_depth = q->nr_requests;
-
        ib_dev->ibd_bd = bd;
 
-       dev = transport_add_device_to_core_hba(hba,
-                       &iblock_template, se_dev, dev_flags, ib_dev,
-                       &dev_limits, "IBLOCK", IBLOCK_VERSION);
-       if (!dev)
-               goto failed;
+       q = bdev_get_queue(bd);
+
+       dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd);
+       dev->dev_attrib.hw_max_sectors = UINT_MAX;
+       dev->dev_attrib.hw_queue_depth = q->nr_requests;
 
        /*
         * Check if the underlying struct block_device request_queue supports
@@ -154,38 +131,41 @@ static struct se_device *iblock_create_virtdevice(
         * in ATA and we need to set TPE=1
         */
        if (blk_queue_discard(q)) {
-               dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count =
+               dev->dev_attrib.max_unmap_lba_count =
                                q->limits.max_discard_sectors;
+
                /*
                 * Currently hardcoded to 1 in Linux/SCSI code..
                 */
-               dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = 1;
-               dev->se_sub_dev->se_dev_attrib.unmap_granularity =
+               dev->dev_attrib.max_unmap_block_desc_count = 1;
+               dev->dev_attrib.unmap_granularity =
                                q->limits.discard_granularity >> 9;
-               dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment =
+               dev->dev_attrib.unmap_granularity_alignment =
                                q->limits.discard_alignment;
 
                pr_debug("IBLOCK: BLOCK Discard support available,"
                                " disabled by default\n");
        }
+       /*
+        * Enable write same emulation for IBLOCK and use 0xFFFF as
+        * the smaller WRITE_SAME(10) only has a two-byte block count.
+        */
+       dev->dev_attrib.max_write_same_len = 0xFFFF;
 
        if (blk_queue_nonrot(q))
-               dev->se_sub_dev->se_dev_attrib.is_nonrot = 1;
-
-       return dev;
+               dev->dev_attrib.is_nonrot = 1;
+       return 0;
 
-failed:
-       if (ib_dev->ibd_bio_set) {
-               bioset_free(ib_dev->ibd_bio_set);
-               ib_dev->ibd_bio_set = NULL;
-       }
-       ib_dev->ibd_bd = NULL;
-       return ERR_PTR(ret);
+out_free_bioset:
+       bioset_free(ib_dev->ibd_bio_set);
+       ib_dev->ibd_bio_set = NULL;
+out:
+       return ret;
 }
 
-static void iblock_free_device(void *p)
+static void iblock_free_device(struct se_device *dev)
 {
-       struct iblock_dev *ib_dev = p;
+       struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
 
        if (ib_dev->ibd_bd != NULL)
                blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
@@ -203,12 +183,12 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
                                        bdev_logical_block_size(bd)) - 1);
        u32 block_size = bdev_logical_block_size(bd);
 
-       if (block_size == dev->se_sub_dev->se_dev_attrib.block_size)
+       if (block_size == dev->dev_attrib.block_size)
                return blocks_long;
 
        switch (block_size) {
        case 4096:
-               switch (dev->se_sub_dev->se_dev_attrib.block_size) {
+               switch (dev->dev_attrib.block_size) {
                case 2048:
                        blocks_long <<= 1;
                        break;
@@ -222,7 +202,7 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
                }
                break;
        case 2048:
-               switch (dev->se_sub_dev->se_dev_attrib.block_size) {
+               switch (dev->dev_attrib.block_size) {
                case 4096:
                        blocks_long >>= 1;
                        break;
@@ -237,7 +217,7 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
                }
                break;
        case 1024:
-               switch (dev->se_sub_dev->se_dev_attrib.block_size) {
+               switch (dev->dev_attrib.block_size) {
                case 4096:
                        blocks_long >>= 2;
                        break;
@@ -252,7 +232,7 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
                }
                break;
        case 512:
-               switch (dev->se_sub_dev->se_dev_attrib.block_size) {
+               switch (dev->dev_attrib.block_size) {
                case 4096:
                        blocks_long >>= 3;
                        break;
@@ -273,6 +253,87 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
        return blocks_long;
 }
 
+static void iblock_complete_cmd(struct se_cmd *cmd)
+{
+       struct iblock_req *ibr = cmd->priv;
+       u8 status;
+
+       if (!atomic_dec_and_test(&ibr->pending))
+               return;
+
+       if (atomic_read(&ibr->ib_bio_err_cnt))
+               status = SAM_STAT_CHECK_CONDITION;
+       else
+               status = SAM_STAT_GOOD;
+
+       target_complete_cmd(cmd, status);
+       kfree(ibr);
+}
+
+static void iblock_bio_done(struct bio *bio, int err)
+{
+       struct se_cmd *cmd = bio->bi_private;
+       struct iblock_req *ibr = cmd->priv;
+
+       /*
+        * Set -EIO if !BIO_UPTODATE and the passed is still err=0
+        */
+       if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err)
+               err = -EIO;
+
+       if (err != 0) {
+               pr_err("test_bit(BIO_UPTODATE) failed for bio: %p,"
+                       " err: %d\n", bio, err);
+               /*
+                * Bump the ib_bio_err_cnt and release bio.
+                */
+               atomic_inc(&ibr->ib_bio_err_cnt);
+               smp_mb__after_atomic_inc();
+       }
+
+       bio_put(bio);
+
+       iblock_complete_cmd(cmd);
+}
+
+static struct bio *
+iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num)
+{
+       struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
+       struct bio *bio;
+
+       /*
+        * Only allocate as many vector entries as the bio code allows us to,
+        * we'll loop later on until we have handled the whole request.
+        */
+       if (sg_num > BIO_MAX_PAGES)
+               sg_num = BIO_MAX_PAGES;
+
+       bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set);
+       if (!bio) {
+               pr_err("Unable to allocate memory for bio\n");
+               return NULL;
+       }
+
+       bio->bi_bdev = ib_dev->ibd_bd;
+       bio->bi_private = cmd;
+       bio->bi_end_io = &iblock_bio_done;
+       bio->bi_sector = lba;
+
+       return bio;
+}
+
+static void iblock_submit_bios(struct bio_list *list, int rw)
+{
+       struct blk_plug plug;
+       struct bio *bio;
+
+       blk_start_plug(&plug);
+       while ((bio = bio_list_pop(list)))
+               submit_bio(rw, bio);
+       blk_finish_plug(&plug);
+}
+
 static void iblock_end_io_flush(struct bio *bio, int err)
 {
        struct se_cmd *cmd = bio->bi_private;
@@ -281,13 +342,10 @@ static void iblock_end_io_flush(struct bio *bio, int err)
                pr_err("IBLOCK: cache flush failed: %d\n", err);
 
        if (cmd) {
-               if (err) {
-                       cmd->scsi_sense_reason =
-                               TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               if (err)
                        target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
-               } else {
+               else
                        target_complete_cmd(cmd, SAM_STAT_GOOD);
-               }
        }
 
        bio_put(bio);
@@ -297,9 +355,10 @@ static void iblock_end_io_flush(struct bio *bio, int err)
  * Implement SYCHRONIZE CACHE.  Note that we can't handle lba ranges and must
  * always flush the whole cache.
  */
-static int iblock_execute_sync_cache(struct se_cmd *cmd)
+static sense_reason_t
+iblock_execute_sync_cache(struct se_cmd *cmd)
 {
-       struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr;
+       struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
        int immed = (cmd->t_task_cdb[1] & 0x2);
        struct bio *bio;
 
@@ -319,25 +378,27 @@ static int iblock_execute_sync_cache(struct se_cmd *cmd)
        return 0;
 }
 
-static int iblock_execute_unmap(struct se_cmd *cmd)
+static sense_reason_t
+iblock_execute_unmap(struct se_cmd *cmd)
 {
        struct se_device *dev = cmd->se_dev;
-       struct iblock_dev *ibd = dev->dev_ptr;
+       struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
        unsigned char *buf, *ptr = NULL;
        sector_t lba;
        int size;
        u32 range;
-       int ret = 0;
-       int dl, bd_dl;
+       sense_reason_t ret = 0;
+       int dl, bd_dl, err;
 
        if (cmd->data_length < 8) {
                pr_warn("UNMAP parameter list length %u too small\n",
                        cmd->data_length);
-               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
-               return -EINVAL;
+               return TCM_INVALID_PARAMETER_LIST;
        }
 
        buf = transport_kmap_data_sg(cmd);
+       if (!buf)
+               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 
        dl = get_unaligned_be16(&buf[0]);
        bd_dl = get_unaligned_be16(&buf[2]);
@@ -349,9 +410,8 @@ static int iblock_execute_unmap(struct se_cmd *cmd)
        else
                size = bd_dl;
 
-       if (size / 16 > dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
-               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
-               ret = -EINVAL;
+       if (size / 16 > dev->dev_attrib.max_unmap_block_desc_count) {
+               ret = TCM_INVALID_PARAMETER_LIST;
                goto err;
        }
 
@@ -366,23 +426,22 @@ static int iblock_execute_unmap(struct se_cmd *cmd)
                pr_debug("UNMAP: Using lba: %llu and range: %u\n",
                                 (unsigned long long)lba, range);
 
-               if (range > dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count) {
-                       cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
-                       ret = -EINVAL;
+               if (range > dev->dev_attrib.max_unmap_lba_count) {
+                       ret = TCM_INVALID_PARAMETER_LIST;
                        goto err;
                }
 
                if (lba + range > dev->transport->get_blocks(dev) + 1) {
-                       cmd->scsi_sense_reason = TCM_ADDRESS_OUT_OF_RANGE;
-                       ret = -EINVAL;
+                       ret = TCM_ADDRESS_OUT_OF_RANGE;
                        goto err;
                }
 
-               ret = blkdev_issue_discard(ibd->ibd_bd, lba, range,
+               err = blkdev_issue_discard(ib_dev->ibd_bd, lba, range,
                                           GFP_KERNEL, 0);
-               if (ret < 0) {
+               if (err < 0) {
                        pr_err("blkdev_issue_discard() failed: %d\n",
-                                       ret);
+                                       err);
+                       ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
                        goto err;
                }
 
@@ -397,23 +456,86 @@ err:
        return ret;
 }
 
-static int iblock_execute_write_same(struct se_cmd *cmd)
+static sense_reason_t
+iblock_execute_write_same_unmap(struct se_cmd *cmd)
 {
-       struct iblock_dev *ibd = cmd->se_dev->dev_ptr;
-       int ret;
-
-       ret = blkdev_issue_discard(ibd->ibd_bd, cmd->t_task_lba,
-                                  spc_get_write_same_sectors(cmd), GFP_KERNEL,
-                                  0);
-       if (ret < 0) {
-               pr_debug("blkdev_issue_discard() failed for WRITE_SAME\n");
-               return ret;
+       struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
+       int rc;
+
+       rc = blkdev_issue_discard(ib_dev->ibd_bd, cmd->t_task_lba,
+                       spc_get_write_same_sectors(cmd), GFP_KERNEL, 0);
+       if (rc < 0) {
+               pr_warn("blkdev_issue_discard() failed: %d\n", rc);
+               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
        }
 
        target_complete_cmd(cmd, GOOD);
        return 0;
 }
 
+static sense_reason_t
+iblock_execute_write_same(struct se_cmd *cmd)
+{
+       struct iblock_req *ibr;
+       struct scatterlist *sg;
+       struct bio *bio;
+       struct bio_list list;
+       sector_t block_lba = cmd->t_task_lba;
+       sector_t sectors = spc_get_write_same_sectors(cmd);
+
+       sg = &cmd->t_data_sg[0];
+
+       if (cmd->t_data_nents > 1 ||
+           sg->length != cmd->se_dev->dev_attrib.block_size) {
+               pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u"
+                       " block_size: %u\n", cmd->t_data_nents, sg->length,
+                       cmd->se_dev->dev_attrib.block_size);
+               return TCM_INVALID_CDB_FIELD;
+       }
+
+       ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
+       if (!ibr)
+               goto fail;
+       cmd->priv = ibr;
+
+       bio = iblock_get_bio(cmd, block_lba, 1);
+       if (!bio)
+               goto fail_free_ibr;
+
+       bio_list_init(&list);
+       bio_list_add(&list, bio);
+
+       atomic_set(&ibr->pending, 1);
+
+       while (sectors) {
+               while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
+                               != sg->length) {
+
+                       bio = iblock_get_bio(cmd, block_lba, 1);
+                       if (!bio)
+                               goto fail_put_bios;
+
+                       atomic_inc(&ibr->pending);
+                       bio_list_add(&list, bio);
+               }
+
+               /* Always in 512 byte units for Linux/Block */
+               block_lba += sg->length >> IBLOCK_LBA_SHIFT;
+               sectors -= 1;
+       }
+
+       iblock_submit_bios(&list, WRITE);
+       return 0;
+
+fail_put_bios:
+       while ((bio = bio_list_pop(&list)))
+               bio_put(bio);
+fail_free_ibr:
+       kfree(ibr);
+fail:
+       return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+}
+
 enum {
        Opt_udev_path, Opt_readonly, Opt_force, Opt_err
 };
@@ -425,11 +547,10 @@ static match_table_t tokens = {
        {Opt_err, NULL}
 };
 
-static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba,
-                                              struct se_subsystem_dev *se_dev,
-                                              const char *page, ssize_t count)
+static ssize_t iblock_set_configfs_dev_params(struct se_device *dev,
+               const char *page, ssize_t count)
 {
-       struct iblock_dev *ib_dev = se_dev->se_dev_su_ptr;
+       struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
        char *orig, *ptr, *arg_p, *opts;
        substring_t args[MAX_OPT_ARGS];
        int ret = 0, token;
@@ -491,43 +612,26 @@ out:
        return (!ret) ? count : ret;
 }
 
-static ssize_t iblock_check_configfs_dev_params(
-       struct se_hba *hba,
-       struct se_subsystem_dev *se_dev)
+static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b)
 {
-       struct iblock_dev *ibd = se_dev->se_dev_su_ptr;
-
-       if (!(ibd->ibd_flags & IBDF_HAS_UDEV_PATH)) {
-               pr_err("Missing udev_path= parameters for IBLOCK\n");
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static ssize_t iblock_show_configfs_dev_params(
-       struct se_hba *hba,
-       struct se_subsystem_dev *se_dev,
-       char *b)
-{
-       struct iblock_dev *ibd = se_dev->se_dev_su_ptr;
-       struct block_device *bd = ibd->ibd_bd;
+       struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+       struct block_device *bd = ib_dev->ibd_bd;
        char buf[BDEVNAME_SIZE];
        ssize_t bl = 0;
 
        if (bd)
                bl += sprintf(b + bl, "iBlock device: %s",
                                bdevname(bd, buf));
-       if (ibd->ibd_flags & IBDF_HAS_UDEV_PATH)
+       if (ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)
                bl += sprintf(b + bl, "  UDEV PATH: %s",
-                               ibd->ibd_udev_path);
-       bl += sprintf(b + bl, "  readonly: %d\n", ibd->ibd_readonly);
+                               ib_dev->ibd_udev_path);
+       bl += sprintf(b + bl, "  readonly: %d\n", ib_dev->ibd_readonly);
 
        bl += sprintf(b + bl, "        ");
        if (bd) {
                bl += sprintf(b + bl, "Major: %d Minor: %d  %s\n",
                        MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ?
-                       "" : (bd->bd_holder == ibd) ?
+                       "" : (bd->bd_holder == ib_dev) ?
                        "CLAIMED: IBLOCK" : "CLAIMED: OS");
        } else {
                bl += sprintf(b + bl, "Major: 0 Minor: 0\n");
@@ -536,61 +640,8 @@ static ssize_t iblock_show_configfs_dev_params(
        return bl;
 }
 
-static void iblock_complete_cmd(struct se_cmd *cmd)
-{
-       struct iblock_req *ibr = cmd->priv;
-       u8 status;
-
-       if (!atomic_dec_and_test(&ibr->pending))
-               return;
-
-       if (atomic_read(&ibr->ib_bio_err_cnt))
-               status = SAM_STAT_CHECK_CONDITION;
-       else
-               status = SAM_STAT_GOOD;
-
-       target_complete_cmd(cmd, status);
-       kfree(ibr);
-}
-
-static struct bio *
-iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num)
-{
-       struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr;
-       struct bio *bio;
-
-       /*
-        * Only allocate as many vector entries as the bio code allows us to,
-        * we'll loop later on until we have handled the whole request.
-        */
-       if (sg_num > BIO_MAX_PAGES)
-               sg_num = BIO_MAX_PAGES;
-
-       bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set);
-       if (!bio) {
-               pr_err("Unable to allocate memory for bio\n");
-               return NULL;
-       }
-
-       bio->bi_bdev = ib_dev->ibd_bd;
-       bio->bi_private = cmd;
-       bio->bi_end_io = &iblock_bio_done;
-       bio->bi_sector = lba;
-       return bio;
-}
-
-static void iblock_submit_bios(struct bio_list *list, int rw)
-{
-       struct blk_plug plug;
-       struct bio *bio;
-
-       blk_start_plug(&plug);
-       while ((bio = bio_list_pop(list)))
-               submit_bio(rw, bio);
-       blk_finish_plug(&plug);
-}
-
-static int iblock_execute_rw(struct se_cmd *cmd)
+static sense_reason_t
+iblock_execute_rw(struct se_cmd *cmd)
 {
        struct scatterlist *sgl = cmd->t_data_sg;
        u32 sgl_nents = cmd->t_data_nents;
@@ -611,8 +662,8 @@ static int iblock_execute_rw(struct se_cmd *cmd)
                 * Force data to disk if we pretend to not have a volatile
                 * write cache, or the initiator set the Force Unit Access bit.
                 */
-               if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 ||
-                   (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
+               if (dev->dev_attrib.emulate_write_cache == 0 ||
+                   (dev->dev_attrib.emulate_fua_write > 0 &&
                     (cmd->se_cmd_flags & SCF_FUA)))
                        rw = WRITE_FUA;
                else
@@ -625,19 +676,18 @@ static int iblock_execute_rw(struct se_cmd *cmd)
         * Convert the blocksize advertised to the initiator to the 512 byte
         * units unconditionally used by the Linux block layer.
         */
-       if (dev->se_sub_dev->se_dev_attrib.block_size == 4096)
+       if (dev->dev_attrib.block_size == 4096)
                block_lba = (cmd->t_task_lba << 3);
-       else if (dev->se_sub_dev->se_dev_attrib.block_size == 2048)
+       else if (dev->dev_attrib.block_size == 2048)
                block_lba = (cmd->t_task_lba << 2);
-       else if (dev->se_sub_dev->se_dev_attrib.block_size == 1024)
+       else if (dev->dev_attrib.block_size == 1024)
                block_lba = (cmd->t_task_lba << 1);
-       else if (dev->se_sub_dev->se_dev_attrib.block_size == 512)
+       else if (dev->dev_attrib.block_size == 512)
                block_lba = cmd->t_task_lba;
        else {
                pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
-                               " %u\n", dev->se_sub_dev->se_dev_attrib.block_size);
-               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-               return -ENOSYS;
+                               " %u\n", dev->dev_attrib.block_size);
+               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
        }
 
        ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
@@ -697,83 +747,48 @@ fail_put_bios:
                bio_put(bio);
 fail_free_ibr:
        kfree(ibr);
-       cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 fail:
-       return -ENOMEM;
-}
-
-static u32 iblock_get_device_rev(struct se_device *dev)
-{
-       return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
-}
-
-static u32 iblock_get_device_type(struct se_device *dev)
-{
-       return TYPE_DISK;
+       return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 }
 
 static sector_t iblock_get_blocks(struct se_device *dev)
 {
-       struct iblock_dev *ibd = dev->dev_ptr;
-       struct block_device *bd = ibd->ibd_bd;
+       struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+       struct block_device *bd = ib_dev->ibd_bd;
        struct request_queue *q = bdev_get_queue(bd);
 
        return iblock_emulate_read_cap_with_block_size(dev, bd, q);
 }
 
-static void iblock_bio_done(struct bio *bio, int err)
-{
-       struct se_cmd *cmd = bio->bi_private;
-       struct iblock_req *ibr = cmd->priv;
-
-       /*
-        * Set -EIO if !BIO_UPTODATE and the passed is still err=0
-        */
-       if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err)
-               err = -EIO;
-
-       if (err != 0) {
-               pr_err("test_bit(BIO_UPTODATE) failed for bio: %p,"
-                       " err: %d\n", bio, err);
-               /*
-                * Bump the ib_bio_err_cnt and release bio.
-                */
-               atomic_inc(&ibr->ib_bio_err_cnt);
-               smp_mb__after_atomic_inc();
-       }
-
-       bio_put(bio);
-
-       iblock_complete_cmd(cmd);
-}
-
-static struct spc_ops iblock_spc_ops = {
+static struct sbc_ops iblock_sbc_ops = {
        .execute_rw             = iblock_execute_rw,
        .execute_sync_cache     = iblock_execute_sync_cache,
        .execute_write_same     = iblock_execute_write_same,
+       .execute_write_same_unmap = iblock_execute_write_same_unmap,
        .execute_unmap          = iblock_execute_unmap,
 };
 
-static int iblock_parse_cdb(struct se_cmd *cmd)
+static sense_reason_t
+iblock_parse_cdb(struct se_cmd *cmd)
 {
-       return sbc_parse_cdb(cmd, &iblock_spc_ops);
+       return sbc_parse_cdb(cmd, &iblock_sbc_ops);
 }
 
 static struct se_subsystem_api iblock_template = {
        .name                   = "iblock",
+       .inquiry_prod           = "IBLOCK",
+       .inquiry_rev            = IBLOCK_VERSION,
        .owner                  = THIS_MODULE,
        .transport_type         = TRANSPORT_PLUGIN_VHBA_PDEV,
        .attach_hba             = iblock_attach_hba,
        .detach_hba             = iblock_detach_hba,
-       .allocate_virtdevice    = iblock_allocate_virtdevice,
-       .create_virtdevice      = iblock_create_virtdevice,
+       .alloc_device           = iblock_alloc_device,
+       .configure_device       = iblock_configure_device,
        .free_device            = iblock_free_device,
        .parse_cdb              = iblock_parse_cdb,
-       .check_configfs_dev_params = iblock_check_configfs_dev_params,
        .set_configfs_dev_params = iblock_set_configfs_dev_params,
        .show_configfs_dev_params = iblock_show_configfs_dev_params,
-       .get_device_rev         = iblock_get_device_rev,
-       .get_device_type        = iblock_get_device_type,
+       .get_device_type        = sbc_get_device_type,
        .get_blocks             = iblock_get_blocks,
 };
 
index 533627a..01c2afd 100644 (file)
@@ -14,6 +14,7 @@ struct iblock_req {
 #define IBDF_HAS_UDEV_PATH             0x01
 
 struct iblock_dev {
+       struct se_device dev;
        unsigned char ibd_udev_path[SE_UDEV_PATH_LEN];
        u32     ibd_flags;
        struct bio_set  *ibd_bio_set;
index 0fd4282..93e9c1f 100644 (file)
@@ -19,18 +19,12 @@ int core_dev_export(struct se_device *, struct se_portal_group *,
                struct se_lun *);
 void   core_dev_unexport(struct se_device *, struct se_portal_group *,
                struct se_lun *);
-int    target_report_luns(struct se_cmd *);
-void   se_release_device_for_hba(struct se_device *);
-void   se_release_vpd_for_dev(struct se_device *);
-int    se_free_virtual_device(struct se_device *, struct se_hba *);
-int    se_dev_check_online(struct se_device *);
-int    se_dev_check_shutdown(struct se_device *);
-void   se_dev_set_default_attribs(struct se_device *, struct se_dev_limits *);
 int    se_dev_set_task_timeout(struct se_device *, u32);
 int    se_dev_set_max_unmap_lba_count(struct se_device *, u32);
 int    se_dev_set_max_unmap_block_desc_count(struct se_device *, u32);
 int    se_dev_set_unmap_granularity(struct se_device *, u32);
 int    se_dev_set_unmap_granularity_alignment(struct se_device *, u32);
+int    se_dev_set_max_write_same_len(struct se_device *, u32);
 int    se_dev_set_emulate_dpo(struct se_device *, int);
 int    se_dev_set_emulate_fua_write(struct se_device *, int);
 int    se_dev_set_emulate_fua_read(struct se_device *, int);
@@ -60,6 +54,9 @@ void  core_dev_free_initiator_node_lun_acl(struct se_portal_group *,
                struct se_lun_acl *lacl);
 int    core_dev_setup_virtual_lun0(void);
 void   core_dev_release_virtual_lun0(void);
+struct se_device *target_alloc_device(struct se_hba *hba, const char *name);
+int    target_configure_device(struct se_device *dev);
+void   target_free_device(struct se_device *);
 
 /* target_core_hba.c */
 struct se_hba *core_alloc_hba(const char *, u32, u32);
@@ -105,10 +102,11 @@ int       transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int);
 bool   target_stop_cmd(struct se_cmd *cmd, unsigned long *flags);
 int    transport_clear_lun_from_sessions(struct se_lun *);
 void   transport_send_task_abort(struct se_cmd *);
-int    target_cmd_size_check(struct se_cmd *cmd, unsigned int size);
+sense_reason_t target_cmd_size_check(struct se_cmd *cmd, unsigned int size);
+void   target_qf_do_work(struct work_struct *work);
 
 /* target_core_stat.c */
-void   target_stat_setup_dev_default_groups(struct se_subsystem_dev *);
+void   target_stat_setup_dev_default_groups(struct se_device *);
 void   target_stat_setup_port_default_groups(struct se_lun *);
 void   target_stat_setup_mappedlun_default_groups(struct se_lun_acl *);
 
index 8c323a9..e35dbf8 100644 (file)
@@ -4,8 +4,7 @@
  * This file contains SPC-3 compliant persistent reservations and
  * legacy SPC-2 reservations with compatible reservation handling (CRH=1)
  *
- * Copyright (c) 2009, 2010 Rising Tide Systems
- * Copyright (c) 2009, 2010 Linux-iSCSI.org
+ * (c) Copyright 2009-2012 RisingTide Systems LLC.
  *
  * Nicholas A. Bellinger <nab@kernel.org>
  *
@@ -68,49 +67,33 @@ int core_pr_dump_initiator_port(
 static void __core_scsi3_complete_pro_release(struct se_device *, struct se_node_acl *,
                        struct t10_pr_registration *, int);
 
-static int core_scsi2_reservation_seq_non_holder(
-       struct se_cmd *cmd,
-       unsigned char *cdb,
-       u32 pr_reg_type)
+static sense_reason_t
+target_scsi2_reservation_check(struct se_cmd *cmd)
 {
-       switch (cdb[0]) {
+       struct se_device *dev = cmd->se_dev;
+       struct se_session *sess = cmd->se_sess;
+
+       switch (cmd->t_task_cdb[0]) {
        case INQUIRY:
        case RELEASE:
        case RELEASE_10:
                return 0;
        default:
-               return 1;
+               break;
        }
 
-       return 1;
-}
-
-static int core_scsi2_reservation_check(struct se_cmd *cmd, u32 *pr_reg_type)
-{
-       struct se_device *dev = cmd->se_dev;
-       struct se_session *sess = cmd->se_sess;
-       int ret;
-
-       if (!sess)
+       if (!dev->dev_reserved_node_acl || !sess)
                return 0;
 
-       spin_lock(&dev->dev_reservation_lock);
-       if (!dev->dev_reserved_node_acl || !sess) {
-               spin_unlock(&dev->dev_reservation_lock);
-               return 0;
-       }
-       if (dev->dev_reserved_node_acl != sess->se_node_acl) {
-               spin_unlock(&dev->dev_reservation_lock);
-               return -EINVAL;
-       }
-       if (!(dev->dev_flags & DF_SPC2_RESERVATIONS_WITH_ISID)) {
-               spin_unlock(&dev->dev_reservation_lock);
-               return 0;
+       if (dev->dev_reserved_node_acl != sess->se_node_acl)
+               return TCM_RESERVATION_CONFLICT;
+
+       if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS_WITH_ISID) {
+               if (dev->dev_res_bin_isid != sess->sess_bin_isid)
+                       return TCM_RESERVATION_CONFLICT;
        }
-       ret = (dev->dev_res_bin_isid == sess->sess_bin_isid) ? 0 : -EINVAL;
-       spin_unlock(&dev->dev_reservation_lock);
 
-       return ret;
+       return 0;
 }
 
 static struct t10_pr_registration *core_scsi3_locate_pr_reg(struct se_device *,
@@ -120,15 +103,11 @@ static void core_scsi3_put_pr_reg(struct t10_pr_registration *);
 static int target_check_scsi2_reservation_conflict(struct se_cmd *cmd)
 {
        struct se_session *se_sess = cmd->se_sess;
-       struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
+       struct se_device *dev = cmd->se_dev;
        struct t10_pr_registration *pr_reg;
-       struct t10_reservation *pr_tmpl = &su_dev->t10_pr;
-       int crh = (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS);
+       struct t10_reservation *pr_tmpl = &dev->t10_pr;
        int conflict = 0;
 
-       if (!crh)
-               return -EINVAL;
-
        pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
                        se_sess);
        if (pr_reg) {
@@ -186,32 +165,28 @@ static int target_check_scsi2_reservation_conflict(struct se_cmd *cmd)
                pr_err("Received legacy SPC-2 RESERVE/RELEASE"
                        " while active SPC-3 registrations exist,"
                        " returning RESERVATION_CONFLICT\n");
-               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
                return -EBUSY;
        }
 
        return 0;
 }
 
-int target_scsi2_reservation_release(struct se_cmd *cmd)
+sense_reason_t
+target_scsi2_reservation_release(struct se_cmd *cmd)
 {
        struct se_device *dev = cmd->se_dev;
        struct se_session *sess = cmd->se_sess;
        struct se_portal_group *tpg;
-       int ret = 0, rc;
+       int rc;
 
        if (!sess || !sess->se_tpg)
                goto out;
        rc = target_check_scsi2_reservation_conflict(cmd);
        if (rc == 1)
                goto out;
-       else if (rc < 0) {
-               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
-               ret = -EINVAL;
-               goto out;
-       }
+       if (rc < 0)
+               return TCM_RESERVATION_CONFLICT;
 
-       ret = 0;
        spin_lock(&dev->dev_reservation_lock);
        if (!dev->dev_reserved_node_acl || !sess)
                goto out_unlock;
@@ -223,10 +198,10 @@ int target_scsi2_reservation_release(struct se_cmd *cmd)
                goto out_unlock;
 
        dev->dev_reserved_node_acl = NULL;
-       dev->dev_flags &= ~DF_SPC2_RESERVATIONS;
-       if (dev->dev_flags & DF_SPC2_RESERVATIONS_WITH_ISID) {
+       dev->dev_reservation_flags &= ~DRF_SPC2_RESERVATIONS;
+       if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS_WITH_ISID) {
                dev->dev_res_bin_isid = 0;
-               dev->dev_flags &= ~DF_SPC2_RESERVATIONS_WITH_ISID;
+               dev->dev_reservation_flags &= ~DRF_SPC2_RESERVATIONS_WITH_ISID;
        }
        tpg = sess->se_tpg;
        pr_debug("SCSI-2 Released reservation for %s LUN: %u ->"
@@ -237,25 +212,24 @@ int target_scsi2_reservation_release(struct se_cmd *cmd)
 out_unlock:
        spin_unlock(&dev->dev_reservation_lock);
 out:
-       if (!ret)
-               target_complete_cmd(cmd, GOOD);
-       return ret;
+       target_complete_cmd(cmd, GOOD);
+       return 0;
 }
 
-int target_scsi2_reservation_reserve(struct se_cmd *cmd)
+sense_reason_t
+target_scsi2_reservation_reserve(struct se_cmd *cmd)
 {
        struct se_device *dev = cmd->se_dev;
        struct se_session *sess = cmd->se_sess;
        struct se_portal_group *tpg;
-       int ret = 0, rc;
+       sense_reason_t ret = 0;
+       int rc;
 
        if ((cmd->t_task_cdb[1] & 0x01) &&
            (cmd->t_task_cdb[1] & 0x02)) {
                pr_err("LongIO and Obselete Bits set, returning"
                                " ILLEGAL_REQUEST\n");
-               cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
-               ret = -EINVAL;
-               goto out;
+               return TCM_UNSUPPORTED_SCSI_OPCODE;
        }
        /*
         * This is currently the case for target_core_mod passthrough struct se_cmd
@@ -266,13 +240,10 @@ int target_scsi2_reservation_reserve(struct se_cmd *cmd)
        rc = target_check_scsi2_reservation_conflict(cmd);
        if (rc == 1)
                goto out;
-       else if (rc < 0) {
-               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
-               ret = -EINVAL;
-               goto out;
-       }
 
-       ret = 0;
+       if (rc < 0)
+               return TCM_RESERVATION_CONFLICT;
+
        tpg = sess->se_tpg;
        spin_lock(&dev->dev_reservation_lock);
        if (dev->dev_reserved_node_acl &&
@@ -286,16 +257,15 @@ int target_scsi2_reservation_reserve(struct se_cmd *cmd)
                        " from %s \n", cmd->se_lun->unpacked_lun,
                        cmd->se_deve->mapped_lun,
                        sess->se_node_acl->initiatorname);
-               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
-               ret = -EINVAL;
+               ret = TCM_RESERVATION_CONFLICT;
                goto out_unlock;
        }
 
        dev->dev_reserved_node_acl = sess->se_node_acl;
-       dev->dev_flags |= DF_SPC2_RESERVATIONS;
+       dev->dev_reservation_flags |= DRF_SPC2_RESERVATIONS;
        if (sess->sess_bin_isid != 0) {
                dev->dev_res_bin_isid = sess->sess_bin_isid;
-               dev->dev_flags |= DF_SPC2_RESERVATIONS_WITH_ISID;
+               dev->dev_reservation_flags |= DRF_SPC2_RESERVATIONS_WITH_ISID;
        }
        pr_debug("SCSI-2 Reserved %s LUN: %u -> MAPPED LUN: %u"
                " for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
@@ -319,9 +289,9 @@ out:
  */
 static int core_scsi3_pr_seq_non_holder(
        struct se_cmd *cmd,
-       unsigned char *cdb,
        u32 pr_reg_type)
 {
+       unsigned char *cdb = cmd->t_task_cdb;
        struct se_dev_entry *se_deve;
        struct se_session *se_sess = cmd->se_sess;
        int other_cdb = 0, ignore_reg;
@@ -330,17 +300,11 @@ static int core_scsi3_pr_seq_non_holder(
        int we = 0; /* Write Exclusive */
        int legacy = 0; /* Act like a legacy device and return
                         * RESERVATION CONFLICT on some CDBs */
-       /*
-        * A legacy SPC-2 reservation is being held.
-        */
-       if (cmd->se_dev->dev_flags & DF_SPC2_RESERVATIONS)
-               return core_scsi2_reservation_seq_non_holder(cmd,
-                                       cdb, pr_reg_type);
 
        se_deve = se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
        /*
         * Determine if the registration should be ignored due to
-        * non-matching ISIDs in core_scsi3_pr_reservation_check().
+        * non-matching ISIDs in target_scsi3_pr_reservation_check().
         */
        ignore_reg = (pr_reg_type & 0x80000000);
        if (ignore_reg)
@@ -563,10 +527,41 @@ static int core_scsi3_pr_seq_non_holder(
        return 1; /* Conflict by default */
 }
 
+static sense_reason_t
+target_scsi3_pr_reservation_check(struct se_cmd *cmd)
+{
+       struct se_device *dev = cmd->se_dev;
+       struct se_session *sess = cmd->se_sess;
+       u32 pr_reg_type;
+
+       if (!dev->dev_pr_res_holder)
+               return 0;
+
+       pr_reg_type = dev->dev_pr_res_holder->pr_res_type;
+       cmd->pr_res_key = dev->dev_pr_res_holder->pr_res_key;
+       if (dev->dev_pr_res_holder->pr_reg_nacl != sess->se_node_acl)
+               goto check_nonholder;
+
+       if (dev->dev_pr_res_holder->isid_present_at_reg) {
+               if (dev->dev_pr_res_holder->pr_reg_bin_isid !=
+                   sess->sess_bin_isid) {
+                       pr_reg_type |= 0x80000000;
+                       goto check_nonholder;
+               }
+       }
+
+       return 0;
+
+check_nonholder:
+       if (core_scsi3_pr_seq_non_holder(cmd, pr_reg_type))
+               return TCM_RESERVATION_CONFLICT;
+       return 0;
+}
+
 static u32 core_scsi3_pr_generation(struct se_device *dev)
 {
-       struct se_subsystem_dev *su_dev = dev->se_sub_dev;
        u32 prg;
+
        /*
         * PRGeneration field shall contain the value of a 32-bit wrapping
         * counter mainted by the device server.
@@ -577,56 +572,12 @@ static u32 core_scsi3_pr_generation(struct se_device *dev)
         * See spc4r17 section 6.3.12 READ_KEYS service action
         */
        spin_lock(&dev->dev_reservation_lock);
-       prg = su_dev->t10_pr.pr_generation++;
+       prg = dev->t10_pr.pr_generation++;
        spin_unlock(&dev->dev_reservation_lock);
 
        return prg;
 }
 
-static int core_scsi3_pr_reservation_check(
-       struct se_cmd *cmd,
-       u32 *pr_reg_type)
-{
-       struct se_device *dev = cmd->se_dev;
-       struct se_session *sess = cmd->se_sess;
-       int ret;
-
-       if (!sess)
-               return 0;
-       /*
-        * A legacy SPC-2 reservation is being held.
-        */
-       if (dev->dev_flags & DF_SPC2_RESERVATIONS)
-               return core_scsi2_reservation_check(cmd, pr_reg_type);
-
-       spin_lock(&dev->dev_reservation_lock);
-       if (!dev->dev_pr_res_holder) {
-               spin_unlock(&dev->dev_reservation_lock);
-               return 0;
-       }
-       *pr_reg_type = dev->dev_pr_res_holder->pr_res_type;
-       cmd->pr_res_key = dev->dev_pr_res_holder->pr_res_key;
-       if (dev->dev_pr_res_holder->pr_reg_nacl != sess->se_node_acl) {
-               spin_unlock(&dev->dev_reservation_lock);
-               return -EINVAL;
-       }
-       if (!dev->dev_pr_res_holder->isid_present_at_reg) {
-               spin_unlock(&dev->dev_reservation_lock);
-               return 0;
-       }
-       ret = (dev->dev_pr_res_holder->pr_reg_bin_isid ==
-              sess->sess_bin_isid) ? 0 : -EINVAL;
-       /*
-        * Use bit in *pr_reg_type to notify ISID mismatch in
-        * core_scsi3_pr_seq_non_holder().
-        */
-       if (ret != 0)
-               *pr_reg_type |= 0x80000000;
-       spin_unlock(&dev->dev_reservation_lock);
-
-       return ret;
-}
-
 static struct t10_pr_registration *__core_scsi3_do_alloc_registration(
        struct se_device *dev,
        struct se_node_acl *nacl,
@@ -636,7 +587,6 @@ static struct t10_pr_registration *__core_scsi3_do_alloc_registration(
        int all_tg_pt,
        int aptpl)
 {
-       struct se_subsystem_dev *su_dev = dev->se_sub_dev;
        struct t10_pr_registration *pr_reg;
 
        pr_reg = kmem_cache_zalloc(t10_pr_reg_cache, GFP_ATOMIC);
@@ -645,7 +595,7 @@ static struct t10_pr_registration *__core_scsi3_do_alloc_registration(
                return NULL;
        }
 
-       pr_reg->pr_aptpl_buf = kzalloc(su_dev->t10_pr.pr_aptpl_buf_len,
+       pr_reg->pr_aptpl_buf = kzalloc(dev->t10_pr.pr_aptpl_buf_len,
                                        GFP_ATOMIC);
        if (!pr_reg->pr_aptpl_buf) {
                pr_err("Unable to allocate pr_reg->pr_aptpl_buf\n");
@@ -929,7 +879,7 @@ static int __core_scsi3_check_aptpl_registration(
        struct se_dev_entry *deve)
 {
        struct t10_pr_registration *pr_reg, *pr_reg_tmp;
-       struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
+       struct t10_reservation *pr_tmpl = &dev->t10_pr;
        unsigned char i_port[PR_APTPL_MAX_IPORT_LEN];
        unsigned char t_port[PR_APTPL_MAX_TPORT_LEN];
        u16 tpgt;
@@ -996,11 +946,10 @@ int core_scsi3_check_aptpl_registration(
        struct se_lun *lun,
        struct se_lun_acl *lun_acl)
 {
-       struct se_subsystem_dev *su_dev = dev->se_sub_dev;
        struct se_node_acl *nacl = lun_acl->se_lun_nacl;
        struct se_dev_entry *deve = nacl->device_list[lun_acl->mapped_lun];
 
-       if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
+       if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
                return 0;
 
        return __core_scsi3_check_aptpl_registration(dev, tpg, lun,
@@ -1051,10 +1000,9 @@ static void __core_scsi3_add_registration(
        int register_type,
        int register_move)
 {
-       struct se_subsystem_dev *su_dev = dev->se_sub_dev;
        struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
        struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe;
-       struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
+       struct t10_reservation *pr_tmpl = &dev->t10_pr;
 
        /*
         * Increment PRgeneration counter for struct se_device upon a successful
@@ -1066,7 +1014,7 @@ static void __core_scsi3_add_registration(
         * for the REGISTER.
         */
        pr_reg->pr_res_generation = (register_move) ?
-                       su_dev->t10_pr.pr_generation++ :
+                       dev->t10_pr.pr_generation++ :
                        core_scsi3_pr_generation(dev);
 
        spin_lock(&pr_tmpl->registration_lock);
@@ -1135,7 +1083,7 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg(
        struct se_node_acl *nacl,
        unsigned char *isid)
 {
-       struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
+       struct t10_reservation *pr_tmpl = &dev->t10_pr;
        struct t10_pr_registration *pr_reg, *pr_reg_tmp;
        struct se_portal_group *tpg;
 
@@ -1160,7 +1108,7 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg(
                         * for fabric modules (iSCSI) requiring them.
                         */
                        if (tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
-                               if (dev->se_sub_dev->se_dev_attrib.enforce_pr_isids)
+                               if (dev->dev_attrib.enforce_pr_isids)
                                        continue;
                        }
                        atomic_inc(&pr_reg->pr_res_holders);
@@ -1274,7 +1222,7 @@ static void __core_scsi3_free_registration(
 {
        struct target_core_fabric_ops *tfo =
                        pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo;
-       struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
+       struct t10_reservation *pr_tmpl = &dev->t10_pr;
        char i_buf[PR_REG_ISID_ID_LEN];
        int prf_isid;
 
@@ -1335,7 +1283,7 @@ void core_scsi3_free_pr_reg_from_nacl(
        struct se_device *dev,
        struct se_node_acl *nacl)
 {
-       struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
+       struct t10_reservation *pr_tmpl = &dev->t10_pr;
        struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder;
        /*
         * If the passed se_node_acl matches the reservation holder,
@@ -1365,7 +1313,7 @@ void core_scsi3_free_pr_reg_from_nacl(
 void core_scsi3_free_all_registrations(
        struct se_device *dev)
 {
-       struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
+       struct t10_reservation *pr_tmpl = &dev->t10_pr;
        struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder;
 
        spin_lock(&dev->dev_reservation_lock);
@@ -1479,7 +1427,8 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve)
        smp_mb__after_atomic_dec();
 }
 
-static int core_scsi3_decode_spec_i_port(
+static sense_reason_t
+core_scsi3_decode_spec_i_port(
        struct se_cmd *cmd,
        struct se_portal_group *tpg,
        unsigned char *l_isid,
@@ -1501,8 +1450,9 @@ static int core_scsi3_decode_spec_i_port(
        unsigned char *buf;
        unsigned char *ptr, *i_str = NULL, proto_ident, tmp_proto_ident;
        char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN];
+       sense_reason_t ret;
        u32 tpdl, tid_len = 0;
-       int ret, dest_local_nexus, prf_isid;
+       int dest_local_nexus, prf_isid;
        u32 dest_rtpi = 0;
 
        memset(dest_iport, 0, 64);
@@ -1517,8 +1467,7 @@ static int core_scsi3_decode_spec_i_port(
        tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), GFP_KERNEL);
        if (!tidh_new) {
                pr_err("Unable to allocate tidh_new\n");
-               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-               return -EINVAL;
+               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
        }
        INIT_LIST_HEAD(&tidh_new->dest_list);
        tidh_new->dest_tpg = tpg;
@@ -1530,8 +1479,7 @@ static int core_scsi3_decode_spec_i_port(
                                sa_res_key, all_tg_pt, aptpl);
        if (!local_pr_reg) {
                kfree(tidh_new);
-               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-               return -ENOMEM;
+               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
        }
        tidh_new->dest_pr_reg = local_pr_reg;
        /*
@@ -1545,12 +1493,16 @@ static int core_scsi3_decode_spec_i_port(
        if (cmd->data_length < 28) {
                pr_warn("SPC-PR: Received PR OUT parameter list"
                        " length too small: %u\n", cmd->data_length);
-               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
-               ret = -EINVAL;
+               ret = TCM_INVALID_PARAMETER_LIST;
                goto out;
        }
 
        buf = transport_kmap_data_sg(cmd);
+       if (!buf) {
+               ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               goto out;
+       }
+
        /*
         * For a PERSISTENT RESERVE OUT specify initiator ports payload,
         * first extract TransportID Parameter Data Length, and make sure
@@ -1565,9 +1517,8 @@ static int core_scsi3_decode_spec_i_port(
                pr_err("SPC-3 PR: Illegal tpdl: %u + 28 byte header"
                        " does not equal CDB data_length: %u\n", tpdl,
                        cmd->data_length);
-               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
-               ret = -EINVAL;
-               goto out;
+               ret = TCM_INVALID_PARAMETER_LIST;
+               goto out_unmap;
        }
        /*
         * Start processing the received transport IDs using the
@@ -1610,16 +1561,13 @@ static int core_scsi3_decode_spec_i_port(
                        smp_mb__after_atomic_inc();
                        spin_unlock(&dev->se_port_lock);
 
-                       ret = core_scsi3_tpg_depend_item(tmp_tpg);
-                       if (ret != 0) {
+                       if (core_scsi3_tpg_depend_item(tmp_tpg)) {
                                pr_err(" core_scsi3_tpg_depend_item()"
                                        " for tmp_tpg\n");
                                atomic_dec(&tmp_tpg->tpg_pr_ref_count);
                                smp_mb__after_atomic_dec();
-                               cmd->scsi_sense_reason =
-                                       TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-                               ret = -EINVAL;
-                               goto out;
+                               ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+                               goto out_unmap;
                        }
                        /*
                         * Locate the destination initiator ACL to be registered
@@ -1641,17 +1589,14 @@ static int core_scsi3_decode_spec_i_port(
                                continue;
                        }
 
-                       ret = core_scsi3_nodeacl_depend_item(dest_node_acl);
-                       if (ret != 0) {
+                       if (core_scsi3_nodeacl_depend_item(dest_node_acl)) {
                                pr_err("configfs_depend_item() failed"
                                        " for dest_node_acl->acl_group\n");
                                atomic_dec(&dest_node_acl->acl_pr_ref_count);
                                smp_mb__after_atomic_dec();
                                core_scsi3_tpg_undepend_item(tmp_tpg);
-                               cmd->scsi_sense_reason =
-                                       TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-                               ret = -EINVAL;
-                               goto out;
+                               ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+                               goto out_unmap;
                        }
 
                        dest_tpg = tmp_tpg;
@@ -1668,9 +1613,8 @@ static int core_scsi3_decode_spec_i_port(
                if (!dest_tpg) {
                        pr_err("SPC-3 PR SPEC_I_PT: Unable to locate"
                                        " dest_tpg\n");
-                       cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
-                       ret = -EINVAL;
-                       goto out;
+                       ret = TCM_INVALID_PARAMETER_LIST;
+                       goto out_unmap;
                }
 
                pr_debug("SPC-3 PR SPEC_I_PT: Got %s data_length: %u tpdl: %u"
@@ -1683,9 +1627,8 @@ static int core_scsi3_decode_spec_i_port(
                                " %u for Transport ID: %s\n", tid_len, ptr);
                        core_scsi3_nodeacl_undepend_item(dest_node_acl);
                        core_scsi3_tpg_undepend_item(dest_tpg);
-                       cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
-                       ret = -EINVAL;
-                       goto out;
+                       ret = TCM_INVALID_PARAMETER_LIST;
+                       goto out_unmap;
                }
                /*
                 * Locate the desintation struct se_dev_entry pointer for matching
@@ -1702,23 +1645,19 @@ static int core_scsi3_decode_spec_i_port(
 
                        core_scsi3_nodeacl_undepend_item(dest_node_acl);
                        core_scsi3_tpg_undepend_item(dest_tpg);
-                       cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
-                       ret = -EINVAL;
-                       goto out;
+                       ret = TCM_INVALID_PARAMETER_LIST;
+                       goto out_unmap;
                }
 
-               ret = core_scsi3_lunacl_depend_item(dest_se_deve);
-               if (ret < 0) {
+               if (core_scsi3_lunacl_depend_item(dest_se_deve)) {
                        pr_err("core_scsi3_lunacl_depend_item()"
                                        " failed\n");
                        atomic_dec(&dest_se_deve->pr_ref_count);
                        smp_mb__after_atomic_dec();
                        core_scsi3_nodeacl_undepend_item(dest_node_acl);
                        core_scsi3_tpg_undepend_item(dest_tpg);
-                       cmd->scsi_sense_reason =
-                               TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-                       ret = -EINVAL;
-                       goto out;
+                       ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+                       goto out_unmap;
                }
 
                pr_debug("SPC-3 PR SPEC_I_PT: Located %s Node: %s"
@@ -1754,10 +1693,8 @@ static int core_scsi3_decode_spec_i_port(
                        core_scsi3_lunacl_undepend_item(dest_se_deve);
                        core_scsi3_nodeacl_undepend_item(dest_node_acl);
                        core_scsi3_tpg_undepend_item(dest_tpg);
-                       cmd->scsi_sense_reason =
-                               TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-                       ret = -ENOMEM;
-                       goto out;
+                       ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+                       goto out_unmap;
                }
                INIT_LIST_HEAD(&tidh_new->dest_list);
                tidh_new->dest_tpg = dest_tpg;
@@ -1788,9 +1725,8 @@ static int core_scsi3_decode_spec_i_port(
                        core_scsi3_nodeacl_undepend_item(dest_node_acl);
                        core_scsi3_tpg_undepend_item(dest_tpg);
                        kfree(tidh_new);
-                       cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
-                       ret = -EINVAL;
-                       goto out;
+                       ret = TCM_INVALID_PARAMETER_LIST;
+                       goto out_unmap;
                }
                tidh_new->dest_pr_reg = dest_pr_reg;
                list_add_tail(&tidh_new->dest_list, &tid_dest_list);
@@ -1848,8 +1784,9 @@ static int core_scsi3_decode_spec_i_port(
        }
 
        return 0;
-out:
+out_unmap:
        transport_kunmap_data_sg(cmd);
+out:
        /*
         * For the failure case, release everything from tid_dest_list
         * including *dest_pr_reg and the configfs dependances..
@@ -1899,7 +1836,6 @@ static int __core_scsi3_update_aptpl_buf(
 {
        struct se_lun *lun;
        struct se_portal_group *tpg;
-       struct se_subsystem_dev *su_dev = dev->se_sub_dev;
        struct t10_pr_registration *pr_reg;
        unsigned char tmp[512], isid_buf[32];
        ssize_t len = 0;
@@ -1917,8 +1853,8 @@ static int __core_scsi3_update_aptpl_buf(
        /*
         * Walk the registration list..
         */
-       spin_lock(&su_dev->t10_pr.registration_lock);
-       list_for_each_entry(pr_reg, &su_dev->t10_pr.registration_list,
+       spin_lock(&dev->t10_pr.registration_lock);
+       list_for_each_entry(pr_reg, &dev->t10_pr.registration_list,
                        pr_reg_list) {
 
                tmp[0] = '\0';
@@ -1963,7 +1899,7 @@ static int __core_scsi3_update_aptpl_buf(
                if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
                        pr_err("Unable to update renaming"
                                " APTPL metadata\n");
-                       spin_unlock(&su_dev->t10_pr.registration_lock);
+                       spin_unlock(&dev->t10_pr.registration_lock);
                        return -EMSGSIZE;
                }
                len += sprintf(buf+len, "%s", tmp);
@@ -1981,13 +1917,13 @@ static int __core_scsi3_update_aptpl_buf(
                if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
                        pr_err("Unable to update renaming"
                                " APTPL metadata\n");
-                       spin_unlock(&su_dev->t10_pr.registration_lock);
+                       spin_unlock(&dev->t10_pr.registration_lock);
                        return -EMSGSIZE;
                }
                len += sprintf(buf+len, "%s", tmp);
                reg_count++;
        }
-       spin_unlock(&su_dev->t10_pr.registration_lock);
+       spin_unlock(&dev->t10_pr.registration_lock);
 
        if (!reg_count)
                len += sprintf(buf+len, "No Registrations or Reservations");
@@ -2019,7 +1955,7 @@ static int __core_scsi3_write_aptpl_to_file(
        unsigned char *buf,
        u32 pr_aptpl_buf_len)
 {
-       struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn;
+       struct t10_wwn *wwn = &dev->t10_wwn;
        struct file *file;
        struct iovec iov[1];
        mm_segment_t old_fs;
@@ -2065,14 +2001,15 @@ static int __core_scsi3_write_aptpl_to_file(
        return 0;
 }
 
-static int core_scsi3_update_and_write_aptpl(
-       struct se_device *dev,
-       unsigned char *in_buf,
-       u32 in_pr_aptpl_buf_len)
+static int
+core_scsi3_update_and_write_aptpl(struct se_device *dev, unsigned char *in_buf,
+               u32 in_pr_aptpl_buf_len)
 {
        unsigned char null_buf[64], *buf;
        u32 pr_aptpl_buf_len;
-       int ret, clear_aptpl_metadata = 0;
+       int clear_aptpl_metadata = 0;
+       int ret;
+
        /*
         * Can be called with a NULL pointer from PROUT service action CLEAR
         */
@@ -2094,25 +2031,17 @@ static int core_scsi3_update_and_write_aptpl(
                                clear_aptpl_metadata);
        if (ret != 0)
                return ret;
+
        /*
         * __core_scsi3_write_aptpl_to_file() will call strlen()
         * on the passed buf to determine pr_aptpl_buf_len.
         */
-       ret = __core_scsi3_write_aptpl_to_file(dev, buf, 0);
-       if (ret != 0)
-               return ret;
-
-       return ret;
+       return __core_scsi3_write_aptpl_to_file(dev, buf, 0);
 }
 
-static int core_scsi3_emulate_pro_register(
-       struct se_cmd *cmd,
-       u64 res_key,
-       u64 sa_res_key,
-       int aptpl,
-       int all_tg_pt,
-       int spec_i_pt,
-       int ignore_key)
+static sense_reason_t
+core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
+               int aptpl, int all_tg_pt, int spec_i_pt, int ignore_key)
 {
        struct se_session *se_sess = cmd->se_sess;
        struct se_device *dev = cmd->se_dev;
@@ -2120,16 +2049,16 @@ static int core_scsi3_emulate_pro_register(
        struct se_lun *se_lun = cmd->se_lun;
        struct se_portal_group *se_tpg;
        struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_reg_tmp, *pr_reg_e;
-       struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
+       struct t10_reservation *pr_tmpl = &dev->t10_pr;
        /* Used for APTPL metadata w/ UNREGISTER */
        unsigned char *pr_aptpl_buf = NULL;
        unsigned char isid_buf[PR_REG_ISID_LEN], *isid_ptr = NULL;
-       int pr_holder = 0, ret = 0, type;
+       sense_reason_t ret;
+       int pr_holder = 0, type;
 
        if (!se_sess || !se_lun) {
                pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
-               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-               return -EINVAL;
+               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
        }
        se_tpg = se_sess->se_tpg;
        se_deve = se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
@@ -2148,8 +2077,7 @@ static int core_scsi3_emulate_pro_register(
                if (res_key) {
                        pr_warn("SPC-3 PR: Reservation Key non-zero"
                                " for SA REGISTER, returning CONFLICT\n");
-                       cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
-                       return -EINVAL;
+                       return TCM_RESERVATION_CONFLICT;
                }
                /*
                 * Do nothing but return GOOD status.
@@ -2163,15 +2091,13 @@ static int core_scsi3_emulate_pro_register(
                         * Port Endpoint that the PRO was received from on the
                         * Logical Unit of the SCSI device server.
                         */
-                       ret = core_scsi3_alloc_registration(cmd->se_dev,
+                       if (core_scsi3_alloc_registration(cmd->se_dev,
                                        se_sess->se_node_acl, se_deve, isid_ptr,
                                        sa_res_key, all_tg_pt, aptpl,
-                                       ignore_key, 0);
-                       if (ret != 0) {
+                                       ignore_key, 0)) {
                                pr_err("Unable to allocate"
                                        " struct t10_pr_registration\n");
-                               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
-                               return -EINVAL;
+                               return TCM_INVALID_PARAMETER_LIST;
                        }
                } else {
                        /*
@@ -2205,201 +2131,192 @@ static int core_scsi3_emulate_pro_register(
                pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev,
                                se_sess->se_node_acl, se_sess);
 
-               ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
+               if (core_scsi3_update_and_write_aptpl(cmd->se_dev,
                                &pr_reg->pr_aptpl_buf[0],
-                               pr_tmpl->pr_aptpl_buf_len);
-               if (!ret) {
+                               pr_tmpl->pr_aptpl_buf_len)) {
                        pr_tmpl->pr_aptpl_active = 1;
                        pr_debug("SPC-3 PR: Set APTPL Bit Activated for REGISTER\n");
                }
 
-               core_scsi3_put_pr_reg(pr_reg);
-               return ret;
-       } else {
-               /*
-                * Locate the existing *pr_reg via struct se_node_acl pointers
-                */
-               pr_reg = pr_reg_e;
-               type = pr_reg->pr_res_type;
-
-               if (!ignore_key) {
-                       if (res_key != pr_reg->pr_res_key) {
-                               pr_err("SPC-3 PR REGISTER: Received"
-                                       " res_key: 0x%016Lx does not match"
-                                       " existing SA REGISTER res_key:"
-                                       " 0x%016Lx\n", res_key,
-                                       pr_reg->pr_res_key);
-                               core_scsi3_put_pr_reg(pr_reg);
-                               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
-                               return -EINVAL;
-                       }
+               goto out_put_pr_reg;
+       }
+
+       /*
+        * Locate the existing *pr_reg via struct se_node_acl pointers
+        */
+       pr_reg = pr_reg_e;
+       type = pr_reg->pr_res_type;
+
+       if (!ignore_key) {
+               if (res_key != pr_reg->pr_res_key) {
+                       pr_err("SPC-3 PR REGISTER: Received"
+                               " res_key: 0x%016Lx does not match"
+                               " existing SA REGISTER res_key:"
+                               " 0x%016Lx\n", res_key,
+                               pr_reg->pr_res_key);
+                       ret = TCM_RESERVATION_CONFLICT;
+                       goto out_put_pr_reg;
                }
-               if (spec_i_pt) {
-                       pr_err("SPC-3 PR UNREGISTER: SPEC_I_PT"
-                               " set while sa_res_key=0\n");
-                       core_scsi3_put_pr_reg(pr_reg);
-                       cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
-                       return -EINVAL;
+       }
+
+       if (spec_i_pt) {
+               pr_err("SPC-3 PR UNREGISTER: SPEC_I_PT"
+                       " set while sa_res_key=0\n");
+               ret = TCM_INVALID_PARAMETER_LIST;
+               goto out_put_pr_reg;
+       }
+
+       /*
+        * An existing ALL_TG_PT=1 registration being released
+        * must also set ALL_TG_PT=1 in the incoming PROUT.
+        */
+       if (pr_reg->pr_reg_all_tg_pt && !(all_tg_pt)) {
+               pr_err("SPC-3 PR UNREGISTER: ALL_TG_PT=1"
+                       " registration exists, but ALL_TG_PT=1 bit not"
+                       " present in received PROUT\n");
+               ret = TCM_INVALID_CDB_FIELD;
+               goto out_put_pr_reg;
+       }
+
+       /*
+        * Allocate APTPL metadata buffer used for UNREGISTER ops
+        */
+       if (aptpl) {
+               pr_aptpl_buf = kzalloc(pr_tmpl->pr_aptpl_buf_len,
+                                       GFP_KERNEL);
+               if (!pr_aptpl_buf) {
+                       pr_err("Unable to allocate"
+                               " pr_aptpl_buf\n");
+                       ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+                       goto out_put_pr_reg;
                }
-               /*
-                * An existing ALL_TG_PT=1 registration being released
-                * must also set ALL_TG_PT=1 in the incoming PROUT.
-                */
-               if (pr_reg->pr_reg_all_tg_pt && !(all_tg_pt)) {
-                       pr_err("SPC-3 PR UNREGISTER: ALL_TG_PT=1"
-                               " registration exists, but ALL_TG_PT=1 bit not"
-                               " present in received PROUT\n");
-                       core_scsi3_put_pr_reg(pr_reg);
-                       cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
-                       return -EINVAL;
+       }
+
+       /*
+        * sa_res_key=0 Unregister Reservation Key for registered I_T
+        * Nexus sa_res_key=1 Change Reservation Key for registered I_T
+        * Nexus.
+        */
+       if (!sa_res_key) {
+               pr_holder = core_scsi3_check_implict_release(
+                               cmd->se_dev, pr_reg);
+               if (pr_holder < 0) {
+                       kfree(pr_aptpl_buf);
+                       ret = TCM_RESERVATION_CONFLICT;
+                       goto out_put_pr_reg;
                }
+
+               spin_lock(&pr_tmpl->registration_lock);
                /*
-                * Allocate APTPL metadata buffer used for UNREGISTER ops
+                * Release all ALL_TG_PT=1 for the matching SCSI Initiator Port
+                * and matching pr_res_key.
                 */
-               if (aptpl) {
-                       pr_aptpl_buf = kzalloc(pr_tmpl->pr_aptpl_buf_len,
-                                               GFP_KERNEL);
-                       if (!pr_aptpl_buf) {
-                               pr_err("Unable to allocate"
-                                       " pr_aptpl_buf\n");
-                               core_scsi3_put_pr_reg(pr_reg);
-                               cmd->scsi_sense_reason =
-                                       TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-                               return -EINVAL;
+               if (pr_reg->pr_reg_all_tg_pt) {
+                       list_for_each_entry_safe(pr_reg_p, pr_reg_tmp,
+                                       &pr_tmpl->registration_list,
+                                       pr_reg_list) {
+
+                               if (!pr_reg_p->pr_reg_all_tg_pt)
+                                       continue;
+                               if (pr_reg_p->pr_res_key != res_key)
+                                       continue;
+                               if (pr_reg == pr_reg_p)
+                                       continue;
+                               if (strcmp(pr_reg->pr_reg_nacl->initiatorname,
+                                          pr_reg_p->pr_reg_nacl->initiatorname))
+                                       continue;
+
+                               __core_scsi3_free_registration(dev,
+                                               pr_reg_p, NULL, 0);
                        }
                }
+
                /*
-                * sa_res_key=0 Unregister Reservation Key for registered I_T
-                * Nexus sa_res_key=1 Change Reservation Key for registered I_T
-                * Nexus.
+                * Release the calling I_T Nexus registration now..
                 */
-               if (!sa_res_key) {
-                       pr_holder = core_scsi3_check_implict_release(
-                                       cmd->se_dev, pr_reg);
-                       if (pr_holder < 0) {
-                               kfree(pr_aptpl_buf);
-                               core_scsi3_put_pr_reg(pr_reg);
-                               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
-                               return -EINVAL;
-                       }
-
-                       spin_lock(&pr_tmpl->registration_lock);
-                       /*
-                        * Release all ALL_TG_PT=1 for the matching SCSI Initiator Port
-                        * and matching pr_res_key.
-                        */
-                       if (pr_reg->pr_reg_all_tg_pt) {
-                               list_for_each_entry_safe(pr_reg_p, pr_reg_tmp,
-                                               &pr_tmpl->registration_list,
-                                               pr_reg_list) {
-
-                                       if (!pr_reg_p->pr_reg_all_tg_pt)
-                                               continue;
+               __core_scsi3_free_registration(cmd->se_dev, pr_reg, NULL, 1);
 
-                                       if (pr_reg_p->pr_res_key != res_key)
-                                               continue;
-
-                                       if (pr_reg == pr_reg_p)
-                                               continue;
-
-                                       if (strcmp(pr_reg->pr_reg_nacl->initiatorname,
-                                                  pr_reg_p->pr_reg_nacl->initiatorname))
-                                               continue;
-
-                                       __core_scsi3_free_registration(dev,
-                                                       pr_reg_p, NULL, 0);
-                               }
-                       }
-                       /*
-                        * Release the calling I_T Nexus registration now..
-                        */
-                       __core_scsi3_free_registration(cmd->se_dev, pr_reg,
-                                                       NULL, 1);
-                       /*
-                        * From spc4r17, section 5.7.11.3 Unregistering
-                        *
-                        * If the persistent reservation is a registrants only
-                        * type, the device server shall establish a unit
-                        * attention condition for the initiator port associated
-                        * with every registered I_T nexus except for the I_T
-                        * nexus on which the PERSISTENT RESERVE OUT command was
-                        * received, with the additional sense code set to
-                        * RESERVATIONS RELEASED.
-                        */
-                       if (pr_holder &&
-                          ((type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY) ||
-                           (type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY))) {
-                               list_for_each_entry(pr_reg_p,
-                                               &pr_tmpl->registration_list,
-                                               pr_reg_list) {
-
-                                       core_scsi3_ua_allocate(
-                                               pr_reg_p->pr_reg_nacl,
-                                               pr_reg_p->pr_res_mapped_lun,
-                                               0x2A,
-                                               ASCQ_2AH_RESERVATIONS_RELEASED);
-                               }
+               /*
+                * From spc4r17, section 5.7.11.3 Unregistering
+                *
+                * If the persistent reservation is a registrants only
+                * type, the device server shall establish a unit
+                * attention condition for the initiator port associated
+                * with every registered I_T nexus except for the I_T
+                * nexus on which the PERSISTENT RESERVE OUT command was
+                * received, with the additional sense code set to
+                * RESERVATIONS RELEASED.
+                */
+               if (pr_holder &&
+                  (type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY ||
+                   type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY)) {
+                       list_for_each_entry(pr_reg_p,
+                                       &pr_tmpl->registration_list,
+                                       pr_reg_list) {
+
+                               core_scsi3_ua_allocate(
+                                       pr_reg_p->pr_reg_nacl,
+                                       pr_reg_p->pr_res_mapped_lun,
+                                       0x2A,
+                                       ASCQ_2AH_RESERVATIONS_RELEASED);
                        }
-                       spin_unlock(&pr_tmpl->registration_lock);
+               }
+               spin_unlock(&pr_tmpl->registration_lock);
 
-                       if (!aptpl) {
-                               pr_tmpl->pr_aptpl_active = 0;
-                               core_scsi3_update_and_write_aptpl(dev, NULL, 0);
-                               pr_debug("SPC-3 PR: Set APTPL Bit Deactivated"
-                                               " for UNREGISTER\n");
-                               return 0;
-                       }
+               if (!aptpl) {
+                       pr_tmpl->pr_aptpl_active = 0;
+                       core_scsi3_update_and_write_aptpl(dev, NULL, 0);
+                       pr_debug("SPC-3 PR: Set APTPL Bit Deactivated"
+                                       " for UNREGISTER\n");
+                       return 0;
+               }
 
-                       ret = core_scsi3_update_and_write_aptpl(dev,
-                                       &pr_aptpl_buf[0],
-                                       pr_tmpl->pr_aptpl_buf_len);
-                       if (!ret) {
-                               pr_tmpl->pr_aptpl_active = 1;
-                               pr_debug("SPC-3 PR: Set APTPL Bit Activated"
-                                               " for UNREGISTER\n");
-                       }
+               if (!core_scsi3_update_and_write_aptpl(dev, &pr_aptpl_buf[0],
+                               pr_tmpl->pr_aptpl_buf_len)) {
+                       pr_tmpl->pr_aptpl_active = 1;
+                       pr_debug("SPC-3 PR: Set APTPL Bit Activated"
+                                       " for UNREGISTER\n");
+               }
 
-                       kfree(pr_aptpl_buf);
-                       return ret;
-               } else {
-                       /*
-                        * Increment PRgeneration counter for struct se_device"
-                        * upon a successful REGISTER, see spc4r17 section 6.3.2
-                        * READ_KEYS service action.
-                        */
-                       pr_reg->pr_res_generation = core_scsi3_pr_generation(
-                                                       cmd->se_dev);
-                       pr_reg->pr_res_key = sa_res_key;
-                       pr_debug("SPC-3 PR [%s] REGISTER%s: Changed Reservation"
-                               " Key for %s to: 0x%016Lx PRgeneration:"
-                               " 0x%08x\n", cmd->se_tfo->get_fabric_name(),
-                               (ignore_key) ? "_AND_IGNORE_EXISTING_KEY" : "",
-                               pr_reg->pr_reg_nacl->initiatorname,
-                               pr_reg->pr_res_key, pr_reg->pr_res_generation);
-
-                       if (!aptpl) {
-                               pr_tmpl->pr_aptpl_active = 0;
-                               core_scsi3_update_and_write_aptpl(dev, NULL, 0);
-                               core_scsi3_put_pr_reg(pr_reg);
-                               pr_debug("SPC-3 PR: Set APTPL Bit Deactivated"
-                                               " for REGISTER\n");
-                               return 0;
-                       }
+               goto out_free_aptpl_buf;
+       }
 
-                       ret = core_scsi3_update_and_write_aptpl(dev,
-                                       &pr_aptpl_buf[0],
-                                       pr_tmpl->pr_aptpl_buf_len);
-                       if (!ret) {
-                               pr_tmpl->pr_aptpl_active = 1;
-                               pr_debug("SPC-3 PR: Set APTPL Bit Activated"
-                                               " for REGISTER\n");
-                       }
+       /*
+        * Increment PRgeneration counter for struct se_device"
+        * upon a successful REGISTER, see spc4r17 section 6.3.2
+        * READ_KEYS service action.
+        */
+       pr_reg->pr_res_generation = core_scsi3_pr_generation(cmd->se_dev);
+       pr_reg->pr_res_key = sa_res_key;
+       pr_debug("SPC-3 PR [%s] REGISTER%s: Changed Reservation"
+               " Key for %s to: 0x%016Lx PRgeneration:"
+               " 0x%08x\n", cmd->se_tfo->get_fabric_name(),
+               (ignore_key) ? "_AND_IGNORE_EXISTING_KEY" : "",
+               pr_reg->pr_reg_nacl->initiatorname,
+               pr_reg->pr_res_key, pr_reg->pr_res_generation);
 
-                       kfree(pr_aptpl_buf);
-                       core_scsi3_put_pr_reg(pr_reg);
-               }
+       if (!aptpl) {
+               pr_tmpl->pr_aptpl_active = 0;
+               core_scsi3_update_and_write_aptpl(dev, NULL, 0);
+               pr_debug("SPC-3 PR: Set APTPL Bit Deactivated"
+                               " for REGISTER\n");
+               ret = 0;
+               goto out_put_pr_reg;
        }
-       return 0;
+
+       if (!core_scsi3_update_and_write_aptpl(dev, &pr_aptpl_buf[0],
+                                               pr_tmpl->pr_aptpl_buf_len)) {
+               pr_tmpl->pr_aptpl_active = 1;
+               pr_debug("SPC-3 PR: Set APTPL Bit Activated"
+                       " for REGISTER\n");
+       }
+
+out_free_aptpl_buf:
+       kfree(pr_aptpl_buf);
+       ret = 0;
+out_put_pr_reg:
+       core_scsi3_put_pr_reg(pr_reg);
+       return ret;
 }
 
 unsigned char *core_scsi3_pr_dump_type(int type)
@@ -2424,26 +2341,23 @@ unsigned char *core_scsi3_pr_dump_type(int type)
        return "Unknown SPC-3 PR Type";
 }
 
-static int core_scsi3_pro_reserve(
-       struct se_cmd *cmd,
-       struct se_device *dev,
-       int type,
-       int scope,
-       u64 res_key)
+static sense_reason_t
+core_scsi3_pro_reserve(struct se_cmd *cmd, int type, int scope, u64 res_key)
 {
+       struct se_device *dev = cmd->se_dev;
        struct se_session *se_sess = cmd->se_sess;
        struct se_lun *se_lun = cmd->se_lun;
        struct t10_pr_registration *pr_reg, *pr_res_holder;
-       struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
+       struct t10_reservation *pr_tmpl = &dev->t10_pr;
        char i_buf[PR_REG_ISID_ID_LEN];
-       int ret, prf_isid;
+       sense_reason_t ret;
+       int prf_isid;
 
        memset(i_buf, 0, PR_REG_ISID_ID_LEN);
 
        if (!se_sess || !se_lun) {
                pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
-               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-               return -EINVAL;
+               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
        }
        /*
         * Locate the existing *pr_reg via struct se_node_acl pointers
@@ -2453,8 +2367,7 @@ static int core_scsi3_pro_reserve(
        if (!pr_reg) {
                pr_err("SPC-3 PR: Unable to locate"
                        " PR_REGISTERED *pr_reg for RESERVE\n");
-               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-               return -EINVAL;
+               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
        }
        /*
         * From spc4r17 Section 5.7.9: Reserving:
@@ -2469,9 +2382,8 @@ static int core_scsi3_pro_reserve(
                pr_err("SPC-3 PR RESERVE: Received res_key: 0x%016Lx"
                        " does not match existing SA REGISTER res_key:"
                        " 0x%016Lx\n", res_key, pr_reg->pr_res_key);
-               core_scsi3_put_pr_reg(pr_reg);
-               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
-               return -EINVAL;
+               ret = TCM_RESERVATION_CONFLICT;
+               goto out_put_pr_reg;
        }
        /*
         * From spc4r17 Section 5.7.9: Reserving:
@@ -2485,9 +2397,8 @@ static int core_scsi3_pro_reserve(
         */
        if (scope != PR_SCOPE_LU_SCOPE) {
                pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
-               core_scsi3_put_pr_reg(pr_reg);
-               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
-               return -EINVAL;
+               ret = TCM_INVALID_PARAMETER_LIST;
+               goto out_put_pr_reg;
        }
        /*
         * See if we have an existing PR reservation holder pointer at
@@ -2518,9 +2429,8 @@ static int core_scsi3_pro_reserve(
                                pr_res_holder->pr_reg_nacl->initiatorname);
 
                        spin_unlock(&dev->dev_reservation_lock);
-                       core_scsi3_put_pr_reg(pr_reg);
-                       cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
-                       return -EINVAL;
+                       ret = TCM_RESERVATION_CONFLICT;
+                       goto out_put_pr_reg;
                }
                /*
                 * From spc4r17 Section 5.7.9: Reserving:
@@ -2542,9 +2452,8 @@ static int core_scsi3_pro_reserve(
                                pr_res_holder->pr_reg_nacl->initiatorname);
 
                        spin_unlock(&dev->dev_reservation_lock);
-                       core_scsi3_put_pr_reg(pr_reg);
-                       cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
-                       return -EINVAL;
+                       ret = TCM_RESERVATION_CONFLICT;
+                       goto out_put_pr_reg;
                }
                /*
                 * From spc4r17 Section 5.7.9: Reserving:
@@ -2557,8 +2466,8 @@ static int core_scsi3_pro_reserve(
                 * shall completethe command with GOOD status.
                 */
                spin_unlock(&dev->dev_reservation_lock);
-               core_scsi3_put_pr_reg(pr_reg);
-               return 0;
+               ret = 0;
+               goto out_put_pr_reg;
        }
        /*
         * Otherwise, our *pr_reg becomes the PR reservation holder for said
@@ -2582,27 +2491,24 @@ static int core_scsi3_pro_reserve(
        spin_unlock(&dev->dev_reservation_lock);
 
        if (pr_tmpl->pr_aptpl_active) {
-               ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
+               if (!core_scsi3_update_and_write_aptpl(cmd->se_dev,
                                &pr_reg->pr_aptpl_buf[0],
-                               pr_tmpl->pr_aptpl_buf_len);
-               if (!ret)
+                               pr_tmpl->pr_aptpl_buf_len)) {
                        pr_debug("SPC-3 PR: Updated APTPL metadata"
                                        " for RESERVE\n");
+               }
        }
 
+       ret = 0;
+out_put_pr_reg:
        core_scsi3_put_pr_reg(pr_reg);
-       return 0;
+       return ret;
 }
 
-static int core_scsi3_emulate_pro_reserve(
-       struct se_cmd *cmd,
-       int type,
-       int scope,
-       u64 res_key)
+static sense_reason_t
+core_scsi3_emulate_pro_reserve(struct se_cmd *cmd, int type, int scope,
+               u64 res_key)
 {
-       struct se_device *dev = cmd->se_dev;
-       int ret = 0;
-
        switch (type) {
        case PR_TYPE_WRITE_EXCLUSIVE:
        case PR_TYPE_EXCLUSIVE_ACCESS:
@@ -2610,16 +2516,12 @@ static int core_scsi3_emulate_pro_reserve(
        case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY:
        case PR_TYPE_WRITE_EXCLUSIVE_ALLREG:
        case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG:
-               ret = core_scsi3_pro_reserve(cmd, dev, type, scope, res_key);
-               break;
+               return core_scsi3_pro_reserve(cmd, type, scope, res_key);
        default:
                pr_err("SPC-3 PR: Unknown Service Action RESERVE Type:"
                        " 0x%02x\n", type);
-               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
-               return -EINVAL;
+               return TCM_INVALID_CDB_FIELD;
        }
-
-       return ret;
 }
 
 /*
@@ -2657,23 +2559,21 @@ static void __core_scsi3_complete_pro_release(
        pr_reg->pr_res_holder = pr_reg->pr_res_type = pr_reg->pr_res_scope = 0;
 }
 
-static int core_scsi3_emulate_pro_release(
-       struct se_cmd *cmd,
-       int type,
-       int scope,
-       u64 res_key)
+static sense_reason_t
+core_scsi3_emulate_pro_release(struct se_cmd *cmd, int type, int scope,
+               u64 res_key)
 {
        struct se_device *dev = cmd->se_dev;
        struct se_session *se_sess = cmd->se_sess;
        struct se_lun *se_lun = cmd->se_lun;
        struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_res_holder;
-       struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
-       int ret, all_reg = 0;
+       struct t10_reservation *pr_tmpl = &dev->t10_pr;
+       int all_reg = 0;
+       sense_reason_t ret = 0;
 
        if (!se_sess || !se_lun) {
                pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
-               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-               return -EINVAL;
+               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
        }
        /*
         * Locate the existing *pr_reg via struct se_node_acl pointers
@@ -2682,8 +2582,7 @@ static int core_scsi3_emulate_pro_release(
        if (!pr_reg) {
                pr_err("SPC-3 PR: Unable to locate"
                        " PR_REGISTERED *pr_reg for RELEASE\n");
-               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-               return -EINVAL;
+               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
        }
        /*
         * From spc4r17 Section 5.7.11.2 Releasing:
@@ -2704,8 +2603,7 @@ static int core_scsi3_emulate_pro_release(
                 * No persistent reservation, return GOOD status.
                 */
                spin_unlock(&dev->dev_reservation_lock);
-               core_scsi3_put_pr_reg(pr_reg);
-               return 0;
+               goto out_put_pr_reg;
        }
        if ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
            (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG))
@@ -2718,9 +2616,9 @@ static int core_scsi3_emulate_pro_release(
                 * persistent reservation holder. return GOOD status.
                 */
                spin_unlock(&dev->dev_reservation_lock);
-               core_scsi3_put_pr_reg(pr_reg);
-               return 0;
+               goto out_put_pr_reg;
        }
+
        /*
         * From spc4r17 Section 5.7.11.2 Releasing:
         *
@@ -2740,9 +2638,8 @@ static int core_scsi3_emulate_pro_release(
                        " does not match existing SA REGISTER res_key:"
                        " 0x%016Lx\n", res_key, pr_reg->pr_res_key);
                spin_unlock(&dev->dev_reservation_lock);
-               core_scsi3_put_pr_reg(pr_reg);
-               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
-               return -EINVAL;
+               ret = TCM_RESERVATION_CONFLICT;
+               goto out_put_pr_reg;
        }
        /*
         * From spc4r17 Section 5.7.11.2 Releasing and above:
@@ -2763,9 +2660,8 @@ static int core_scsi3_emulate_pro_release(
                        pr_res_holder->pr_reg_nacl->initiatorname);
 
                spin_unlock(&dev->dev_reservation_lock);
-               core_scsi3_put_pr_reg(pr_reg);
-               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
-               return -EINVAL;
+               ret = TCM_RESERVATION_CONFLICT;
+               goto out_put_pr_reg;
        }
        /*
         * In response to a persistent reservation release request from the
@@ -2818,25 +2714,23 @@ static int core_scsi3_emulate_pro_release(
 
 write_aptpl:
        if (pr_tmpl->pr_aptpl_active) {
-               ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
-                               &pr_reg->pr_aptpl_buf[0],
-                               pr_tmpl->pr_aptpl_buf_len);
-               if (!ret)
+               if (!core_scsi3_update_and_write_aptpl(cmd->se_dev,
+                       &pr_reg->pr_aptpl_buf[0], pr_tmpl->pr_aptpl_buf_len)) {
                        pr_debug("SPC-3 PR: Updated APTPL metadata for RELEASE\n");
+               }
        }
-
+out_put_pr_reg:
        core_scsi3_put_pr_reg(pr_reg);
-       return 0;
+       return ret;
 }
 
-static int core_scsi3_emulate_pro_clear(
-       struct se_cmd *cmd,
-       u64 res_key)
+static sense_reason_t
+core_scsi3_emulate_pro_clear(struct se_cmd *cmd, u64 res_key)
 {
        struct se_device *dev = cmd->se_dev;
        struct se_node_acl *pr_reg_nacl;
        struct se_session *se_sess = cmd->se_sess;
-       struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
+       struct t10_reservation *pr_tmpl = &dev->t10_pr;
        struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder;
        u32 pr_res_mapped_lun = 0;
        int calling_it_nexus = 0;
@@ -2848,8 +2742,7 @@ static int core_scsi3_emulate_pro_clear(
        if (!pr_reg_n) {
                pr_err("SPC-3 PR: Unable to locate"
                        " PR_REGISTERED *pr_reg for CLEAR\n");
-               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-               return -EINVAL;
+               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
        }
        /*
         * From spc4r17 section 5.7.11.6, Clearing:
@@ -2868,8 +2761,7 @@ static int core_scsi3_emulate_pro_clear(
                        " existing SA REGISTER res_key:"
                        " 0x%016Lx\n", res_key, pr_reg_n->pr_res_key);
                core_scsi3_put_pr_reg(pr_reg_n);
-               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
-               return -EINVAL;
+               return TCM_RESERVATION_CONFLICT;
        }
        /*
         * a) Release the persistent reservation, if any;
@@ -2993,28 +2885,22 @@ static void core_scsi3_release_preempt_and_abort(
        }
 }
 
-static int core_scsi3_pro_preempt(
-       struct se_cmd *cmd,
-       int type,
-       int scope,
-       u64 res_key,
-       u64 sa_res_key,
-       int abort)
+static sense_reason_t
+core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
+               u64 sa_res_key, int abort)
 {
        struct se_device *dev = cmd->se_dev;
        struct se_node_acl *pr_reg_nacl;
        struct se_session *se_sess = cmd->se_sess;
        LIST_HEAD(preempt_and_abort_list);
        struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder;
-       struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
+       struct t10_reservation *pr_tmpl = &dev->t10_pr;
        u32 pr_res_mapped_lun = 0;
        int all_reg = 0, calling_it_nexus = 0, released_regs = 0;
-       int prh_type = 0, prh_scope = 0, ret;
+       int prh_type = 0, prh_scope = 0;
 
-       if (!se_sess) {
-               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-               return -EINVAL;
-       }
+       if (!se_sess)
+               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 
        pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
                                se_sess);
@@ -3022,19 +2908,16 @@ static int core_scsi3_pro_preempt(
                pr_err("SPC-3 PR: Unable to locate"
                        " PR_REGISTERED *pr_reg for PREEMPT%s\n",
                        (abort) ? "_AND_ABORT" : "");
-               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
-               return -EINVAL;
+               return TCM_RESERVATION_CONFLICT;
        }
        if (pr_reg_n->pr_res_key != res_key) {
                core_scsi3_put_pr_reg(pr_reg_n);
-               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
-               return -EINVAL;
+               return TCM_RESERVATION_CONFLICT;
        }
        if (scope != PR_SCOPE_LU_SCOPE) {
                pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
                core_scsi3_put_pr_reg(pr_reg_n);
-               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
-               return -EINVAL;
+               return TCM_INVALID_PARAMETER_LIST;
        }
 
        spin_lock(&dev->dev_reservation_lock);
@@ -3047,8 +2930,7 @@ static int core_scsi3_pro_preempt(
        if (!all_reg && !sa_res_key) {
                spin_unlock(&dev->dev_reservation_lock);
                core_scsi3_put_pr_reg(pr_reg_n);
-               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
-               return -EINVAL;
+               return TCM_INVALID_PARAMETER_LIST;
        }
        /*
         * From spc4r17, section 5.7.11.4.4 Removing Registrations:
@@ -3142,8 +3024,7 @@ static int core_scsi3_pro_preempt(
                if (!released_regs) {
                        spin_unlock(&dev->dev_reservation_lock);
                        core_scsi3_put_pr_reg(pr_reg_n);
-                       cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
-                       return -EINVAL;
+                       return TCM_RESERVATION_CONFLICT;
                }
                /*
                 * For an existing all registrants type reservation
@@ -3162,13 +3043,13 @@ static int core_scsi3_pro_preempt(
                spin_unlock(&dev->dev_reservation_lock);
 
                if (pr_tmpl->pr_aptpl_active) {
-                       ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
+                       if (!core_scsi3_update_and_write_aptpl(cmd->se_dev,
                                        &pr_reg_n->pr_aptpl_buf[0],
-                                       pr_tmpl->pr_aptpl_buf_len);
-                       if (!ret)
+                                       pr_tmpl->pr_aptpl_buf_len)) {
                                pr_debug("SPC-3 PR: Updated APTPL"
                                        " metadata for  PREEMPT%s\n", (abort) ?
                                        "_AND_ABORT" : "");
+                       }
                }
 
                core_scsi3_put_pr_reg(pr_reg_n);
@@ -3298,12 +3179,12 @@ static int core_scsi3_pro_preempt(
        }
 
        if (pr_tmpl->pr_aptpl_active) {
-               ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
+               if (!core_scsi3_update_and_write_aptpl(cmd->se_dev,
                                &pr_reg_n->pr_aptpl_buf[0],
-                               pr_tmpl->pr_aptpl_buf_len);
-               if (!ret)
+                               pr_tmpl->pr_aptpl_buf_len)) {
                        pr_debug("SPC-3 PR: Updated APTPL metadata for PREEMPT"
-                               "%s\n", (abort) ? "_AND_ABORT" : "");
+                               "%s\n", abort ? "_AND_ABORT" : "");
+               }
        }
 
        core_scsi3_put_pr_reg(pr_reg_n);
@@ -3311,16 +3192,10 @@ static int core_scsi3_pro_preempt(
        return 0;
 }
 
-static int core_scsi3_emulate_pro_preempt(
-       struct se_cmd *cmd,
-       int type,
-       int scope,
-       u64 res_key,
-       u64 sa_res_key,
-       int abort)
+static sense_reason_t
+core_scsi3_emulate_pro_preempt(struct se_cmd *cmd, int type, int scope,
+               u64 res_key, u64 sa_res_key, int abort)
 {
-       int ret = 0;
-
        switch (type) {
        case PR_TYPE_WRITE_EXCLUSIVE:
        case PR_TYPE_EXCLUSIVE_ACCESS:
@@ -3328,26 +3203,19 @@ static int core_scsi3_emulate_pro_preempt(
        case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY:
        case PR_TYPE_WRITE_EXCLUSIVE_ALLREG:
        case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG:
-               ret = core_scsi3_pro_preempt(cmd, type, scope,
-                               res_key, sa_res_key, abort);
-               break;
+               return core_scsi3_pro_preempt(cmd, type, scope, res_key,
+                                             sa_res_key, abort);
        default:
                pr_err("SPC-3 PR: Unknown Service Action PREEMPT%s"
                        " Type: 0x%02x\n", (abort) ? "_AND_ABORT" : "", type);
-               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
-               return -EINVAL;
+               return TCM_INVALID_CDB_FIELD;
        }
-
-       return ret;
 }
 
 
-static int core_scsi3_emulate_pro_register_and_move(
-       struct se_cmd *cmd,
-       u64 res_key,
-       u64 sa_res_key,
-       int aptpl,
-       int unreg)
+static sense_reason_t
+core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
+               u64 sa_res_key, int aptpl, int unreg)
 {
        struct se_session *se_sess = cmd->se_sess;
        struct se_device *dev = cmd->se_dev;
@@ -3358,20 +3226,21 @@ static int core_scsi3_emulate_pro_register_and_move(
        struct se_portal_group *se_tpg, *dest_se_tpg = NULL;
        struct target_core_fabric_ops *dest_tf_ops = NULL, *tf_ops;
        struct t10_pr_registration *pr_reg, *pr_res_holder, *dest_pr_reg;
-       struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
+       struct t10_reservation *pr_tmpl = &dev->t10_pr;
        unsigned char *buf;
        unsigned char *initiator_str;
        char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN];
        u32 tid_len, tmp_tid_len;
-       int new_reg = 0, type, scope, ret, matching_iname, prf_isid;
+       int new_reg = 0, type, scope, matching_iname, prf_isid;
+       sense_reason_t ret;
        unsigned short rtpi;
        unsigned char proto_ident;
 
        if (!se_sess || !se_lun) {
                pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
-               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-               return -EINVAL;
+               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
        }
+
        memset(dest_iport, 0, 64);
        memset(i_buf, 0, PR_REG_ISID_ID_LEN);
        se_tpg = se_sess->se_tpg;
@@ -3387,8 +3256,7 @@ static int core_scsi3_emulate_pro_register_and_move(
        if (!pr_reg) {
                pr_err("SPC-3 PR: Unable to locate PR_REGISTERED"
                        " *pr_reg for REGISTER_AND_MOVE\n");
-               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-               return -EINVAL;
+               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
        }
        /*
         * The provided reservation key much match the existing reservation key
@@ -3398,9 +3266,8 @@ static int core_scsi3_emulate_pro_register_and_move(
                pr_warn("SPC-3 PR REGISTER_AND_MOVE: Received"
                        " res_key: 0x%016Lx does not match existing SA REGISTER"
                        " res_key: 0x%016Lx\n", res_key, pr_reg->pr_res_key);
-               core_scsi3_put_pr_reg(pr_reg);
-               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
-               return -EINVAL;
+               ret = TCM_RESERVATION_CONFLICT;
+               goto out_put_pr_reg;
        }
        /*
         * The service active reservation key needs to be non zero
@@ -3408,9 +3275,8 @@ static int core_scsi3_emulate_pro_register_and_move(
        if (!sa_res_key) {
                pr_warn("SPC-3 PR REGISTER_AND_MOVE: Received zero"
                        " sa_res_key\n");
-               core_scsi3_put_pr_reg(pr_reg);
-               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
-               return -EINVAL;
+               ret = TCM_INVALID_PARAMETER_LIST;
+               goto out_put_pr_reg;
        }
 
        /*
@@ -3419,6 +3285,11 @@ static int core_scsi3_emulate_pro_register_and_move(
         * information.
         */
        buf = transport_kmap_data_sg(cmd);
+       if (!buf) {
+               ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               goto out_put_pr_reg;
+       }
+
        rtpi = (buf[18] & 0xff) << 8;
        rtpi |= buf[19] & 0xff;
        tid_len = (buf[20] & 0xff) << 24;
@@ -3432,9 +3303,8 @@ static int core_scsi3_emulate_pro_register_and_move(
                pr_err("SPC-3 PR: Illegal tid_len: %u + 24 byte header"
                        " does not equal CDB data_length: %u\n", tid_len,
                        cmd->data_length);
-               core_scsi3_put_pr_reg(pr_reg);
-               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
-               return -EINVAL;
+               ret = TCM_INVALID_PARAMETER_LIST;
+               goto out_put_pr_reg;
        }
 
        spin_lock(&dev->se_port_lock);
@@ -3452,15 +3322,13 @@ static int core_scsi3_emulate_pro_register_and_move(
                smp_mb__after_atomic_inc();
                spin_unlock(&dev->se_port_lock);
 
-               ret = core_scsi3_tpg_depend_item(dest_se_tpg);
-               if (ret != 0) {
+               if (core_scsi3_tpg_depend_item(dest_se_tpg)) {
                        pr_err("core_scsi3_tpg_depend_item() failed"
                                " for dest_se_tpg\n");
                        atomic_dec(&dest_se_tpg->tpg_pr_ref_count);
                        smp_mb__after_atomic_dec();
-                       core_scsi3_put_pr_reg(pr_reg);
-                       cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-                       return -EINVAL;
+                       ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+                       goto out_put_pr_reg;
                }
 
                spin_lock(&dev->se_port_lock);
@@ -3472,12 +3340,15 @@ static int core_scsi3_emulate_pro_register_and_move(
                pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate"
                        " fabric ops from Relative Target Port Identifier:"
                        " %hu\n", rtpi);
-               core_scsi3_put_pr_reg(pr_reg);
-               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
-               return -EINVAL;
+               ret = TCM_INVALID_PARAMETER_LIST;
+               goto out_put_pr_reg;
        }
 
        buf = transport_kmap_data_sg(cmd);
+       if (!buf) {
+               ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               goto out_put_pr_reg;
+       }
        proto_ident = (buf[24] & 0x0f);
 
        pr_debug("SPC-3 PR REGISTER_AND_MOVE: Extracted Protocol Identifier:"
@@ -3489,16 +3360,14 @@ static int core_scsi3_emulate_pro_register_and_move(
                        " from fabric: %s\n", proto_ident,
                        dest_tf_ops->get_fabric_proto_ident(dest_se_tpg),
                        dest_tf_ops->get_fabric_name());
-               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
-               ret = -EINVAL;
+               ret = TCM_INVALID_PARAMETER_LIST;
                goto out;
        }
        if (dest_tf_ops->tpg_parse_pr_out_transport_id == NULL) {
                pr_err("SPC-3 PR REGISTER_AND_MOVE: Fabric does not"
                        " containg a valid tpg_parse_pr_out_transport_id"
                        " function pointer\n");
-               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-               ret = -EINVAL;
+               ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
                goto out;
        }
        initiator_str = dest_tf_ops->tpg_parse_pr_out_transport_id(dest_se_tpg,
@@ -3506,8 +3375,7 @@ static int core_scsi3_emulate_pro_register_and_move(
        if (!initiator_str) {
                pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate"
                        " initiator_str from Transport ID\n");
-               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
-               ret = -EINVAL;
+               ret = TCM_INVALID_PARAMETER_LIST;
                goto out;
        }
 
@@ -3536,8 +3404,7 @@ static int core_scsi3_emulate_pro_register_and_move(
                pr_err("SPC-3 PR REGISTER_AND_MOVE: TransportID: %s"
                        " matches: %s on received I_T Nexus\n", initiator_str,
                        pr_reg_nacl->initiatorname);
-               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
-               ret = -EINVAL;
+               ret = TCM_INVALID_PARAMETER_LIST;
                goto out;
        }
        if (!strcmp(iport_ptr, pr_reg->pr_reg_isid)) {
@@ -3545,8 +3412,7 @@ static int core_scsi3_emulate_pro_register_and_move(
                        " matches: %s %s on received I_T Nexus\n",
                        initiator_str, iport_ptr, pr_reg_nacl->initiatorname,
                        pr_reg->pr_reg_isid);
-               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
-               ret = -EINVAL;
+               ret = TCM_INVALID_PARAMETER_LIST;
                goto out;
        }
 after_iport_check:
@@ -3566,19 +3432,17 @@ after_iport_check:
                pr_err("Unable to locate %s dest_node_acl for"
                        " TransportID%s\n", dest_tf_ops->get_fabric_name(),
                        initiator_str);
-               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
-               ret = -EINVAL;
+               ret = TCM_INVALID_PARAMETER_LIST;
                goto out;
        }
-       ret = core_scsi3_nodeacl_depend_item(dest_node_acl);
-       if (ret != 0) {
+
+       if (core_scsi3_nodeacl_depend_item(dest_node_acl)) {
                pr_err("core_scsi3_nodeacl_depend_item() for"
                        " dest_node_acl\n");
                atomic_dec(&dest_node_acl->acl_pr_ref_count);
                smp_mb__after_atomic_dec();
                dest_node_acl = NULL;
-               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
-               ret = -EINVAL;
+               ret = TCM_INVALID_PARAMETER_LIST;
                goto out;
        }
 
@@ -3594,19 +3458,16 @@ after_iport_check:
        if (!dest_se_deve) {
                pr_err("Unable to locate %s dest_se_deve from RTPI:"
                        " %hu\n",  dest_tf_ops->get_fabric_name(), rtpi);
-               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
-               ret = -EINVAL;
+               ret = TCM_INVALID_PARAMETER_LIST;
                goto out;
        }
 
-       ret = core_scsi3_lunacl_depend_item(dest_se_deve);
-       if (ret < 0) {
+       if (core_scsi3_lunacl_depend_item(dest_se_deve)) {
                pr_err("core_scsi3_lunacl_depend_item() failed\n");
                atomic_dec(&dest_se_deve->pr_ref_count);
                smp_mb__after_atomic_dec();
                dest_se_deve = NULL;
-               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-               ret = -EINVAL;
+               ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
                goto out;
        }
 
@@ -3625,8 +3486,7 @@ after_iport_check:
                pr_warn("SPC-3 PR REGISTER_AND_MOVE: No reservation"
                        " currently held\n");
                spin_unlock(&dev->dev_reservation_lock);
-               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
-               ret = -EINVAL;
+               ret = TCM_INVALID_CDB_FIELD;
                goto out;
        }
        /*
@@ -3639,8 +3499,7 @@ after_iport_check:
                pr_warn("SPC-3 PR REGISTER_AND_MOVE: Calling I_T"
                        " Nexus is not reservation holder\n");
                spin_unlock(&dev->dev_reservation_lock);
-               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
-               ret = -EINVAL;
+               ret = TCM_RESERVATION_CONFLICT;
                goto out;
        }
        /*
@@ -3658,8 +3517,7 @@ after_iport_check:
                        " reservation for type: %s\n",
                        core_scsi3_pr_dump_type(pr_res_holder->pr_res_type));
                spin_unlock(&dev->dev_reservation_lock);
-               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
-               ret = -EINVAL;
+               ret = TCM_RESERVATION_CONFLICT;
                goto out;
        }
        pr_res_nacl = pr_res_holder->pr_reg_nacl;
@@ -3691,13 +3549,11 @@ after_iport_check:
        dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
                                        iport_ptr);
        if (!dest_pr_reg) {
-               ret = core_scsi3_alloc_registration(cmd->se_dev,
+               if (core_scsi3_alloc_registration(cmd->se_dev,
                                dest_node_acl, dest_se_deve, iport_ptr,
-                               sa_res_key, 0, aptpl, 2, 1);
-               if (ret != 0) {
+                               sa_res_key, 0, aptpl, 2, 1)) {
                        spin_unlock(&dev->dev_reservation_lock);
-                       cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
-                       ret = -EINVAL;
+                       ret = TCM_INVALID_PARAMETER_LIST;
                        goto out;
                }
                dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
@@ -3768,12 +3624,12 @@ after_iport_check:
                                " REGISTER_AND_MOVE\n");
        } else {
                pr_tmpl->pr_aptpl_active = 1;
-               ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
+               if (!core_scsi3_update_and_write_aptpl(cmd->se_dev,
                                &dest_pr_reg->pr_aptpl_buf[0],
-                               pr_tmpl->pr_aptpl_buf_len);
-               if (!ret)
+                               pr_tmpl->pr_aptpl_buf_len)) {
                        pr_debug("SPC-3 PR: Set APTPL Bit Activated for"
                                        " REGISTER_AND_MOVE\n");
+               }
        }
 
        transport_kunmap_data_sg(cmd);
@@ -3788,6 +3644,8 @@ out:
        if (dest_node_acl)
                core_scsi3_nodeacl_undepend_item(dest_node_acl);
        core_scsi3_tpg_undepend_item(dest_se_tpg);
+
+out_put_pr_reg:
        core_scsi3_put_pr_reg(pr_reg);
        return ret;
 }
@@ -3805,14 +3663,15 @@ static unsigned long long core_scsi3_extract_reservation_key(unsigned char *cdb)
 /*
  * See spc4r17 section 6.14 Table 170
  */
-int target_scsi3_emulate_pr_out(struct se_cmd *cmd)
+sense_reason_t
+target_scsi3_emulate_pr_out(struct se_cmd *cmd)
 {
        unsigned char *cdb = &cmd->t_task_cdb[0];
        unsigned char *buf;
        u64 res_key, sa_res_key;
        int sa, scope, type, aptpl;
        int spec_i_pt = 0, all_tg_pt = 0, unreg = 0;
-       int ret;
+       sense_reason_t ret;
 
        /*
         * Following spc2r20 5.5.1 Reservations overview:
@@ -3823,32 +3682,26 @@ int target_scsi3_emulate_pr_out(struct se_cmd *cmd)
         * initiator or service action and shall terminate with a RESERVATION
         * CONFLICT status.
         */
-       if (cmd->se_dev->dev_flags & DF_SPC2_RESERVATIONS) {
+       if (cmd->se_dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) {
                pr_err("Received PERSISTENT_RESERVE CDB while legacy"
                        " SPC-2 reservation is held, returning"
                        " RESERVATION_CONFLICT\n");
-               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
-               ret = -EINVAL;
-               goto out;
+               return TCM_RESERVATION_CONFLICT;
        }
 
        /*
         * FIXME: A NULL struct se_session pointer means an this is not coming from
         * a $FABRIC_MOD's nexus, but from internal passthrough ops.
         */
-       if (!cmd->se_sess) {
-               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-               ret = -EINVAL;
-               goto out;
-       }
+       if (!cmd->se_sess)
+               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 
        if (cmd->data_length < 24) {
                pr_warn("SPC-PR: Received PR OUT parameter list"
                        " length too small: %u\n", cmd->data_length);
-               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
-               ret = -EINVAL;
-               goto out;
+               return TCM_INVALID_PARAMETER_LIST;
        }
+
        /*
         * From the PERSISTENT_RESERVE_OUT command descriptor block (CDB)
         */
@@ -3857,6 +3710,9 @@ int target_scsi3_emulate_pr_out(struct se_cmd *cmd)
        type = (cdb[2] & 0x0f);
 
        buf = transport_kmap_data_sg(cmd);
+       if (!buf)
+               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
        /*
         * From PERSISTENT_RESERVE_OUT parameter list (payload)
         */
@@ -3880,11 +3736,8 @@ int target_scsi3_emulate_pr_out(struct se_cmd *cmd)
        /*
         * SPEC_I_PT=1 is only valid for Service action: REGISTER
         */
-       if (spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER)) {
-               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
-               ret = -EINVAL;
-               goto out;
-       }
+       if (spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER))
+               return TCM_INVALID_PARAMETER_LIST;
 
        /*
         * From spc4r17 section 6.14:
@@ -3899,10 +3752,9 @@ int target_scsi3_emulate_pr_out(struct se_cmd *cmd)
            (cmd->data_length != 24)) {
                pr_warn("SPC-PR: Received PR OUT illegal parameter"
                        " list length: %u\n", cmd->data_length);
-               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
-               ret = -EINVAL;
-               goto out;
+               return TCM_INVALID_PARAMETER_LIST;
        }
+
        /*
         * (core_scsi3_emulate_pro_* function parameters
         * are defined by spc4r17 Table 174:
@@ -3941,12 +3793,9 @@ int target_scsi3_emulate_pr_out(struct se_cmd *cmd)
        default:
                pr_err("Unknown PERSISTENT_RESERVE_OUT service"
                        " action: 0x%02x\n", cdb[1] & 0x1f);
-               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
-               ret = -EINVAL;
-               break;
+               return TCM_INVALID_CDB_FIELD;
        }
 
-out:
        if (!ret)
                target_complete_cmd(cmd, GOOD);
        return ret;
@@ -3957,10 +3806,10 @@ out:
  *
  * See spc4r17 section 5.7.6.2 and section 6.13.2, Table 160
  */
-static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
+static sense_reason_t
+core_scsi3_pri_read_keys(struct se_cmd *cmd)
 {
-       struct se_device *se_dev = cmd->se_dev;
-       struct se_subsystem_dev *su_dev = se_dev->se_sub_dev;
+       struct se_device *dev = cmd->se_dev;
        struct t10_pr_registration *pr_reg;
        unsigned char *buf;
        u32 add_len = 0, off = 8;
@@ -3968,18 +3817,20 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
        if (cmd->data_length < 8) {
                pr_err("PRIN SA READ_KEYS SCSI Data Length: %u"
                        " too small\n", cmd->data_length);
-               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
-               return -EINVAL;
+               return TCM_INVALID_CDB_FIELD;
        }
 
        buf = transport_kmap_data_sg(cmd);
-       buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff);
-       buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff);
-       buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff);
-       buf[3] = (su_dev->t10_pr.pr_generation & 0xff);
+       if (!buf)
+               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 
-       spin_lock(&su_dev->t10_pr.registration_lock);
-       list_for_each_entry(pr_reg, &su_dev->t10_pr.registration_list,
+       buf[0] = ((dev->t10_pr.pr_generation >> 24) & 0xff);
+       buf[1] = ((dev->t10_pr.pr_generation >> 16) & 0xff);
+       buf[2] = ((dev->t10_pr.pr_generation >> 8) & 0xff);
+       buf[3] = (dev->t10_pr.pr_generation & 0xff);
+
+       spin_lock(&dev->t10_pr.registration_lock);
+       list_for_each_entry(pr_reg, &dev->t10_pr.registration_list,
                        pr_reg_list) {
                /*
                 * Check for overflow of 8byte PRI READ_KEYS payload and
@@ -3999,7 +3850,7 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
 
                add_len += 8;
        }
-       spin_unlock(&su_dev->t10_pr.registration_lock);
+       spin_unlock(&dev->t10_pr.registration_lock);
 
        buf[4] = ((add_len >> 24) & 0xff);
        buf[5] = ((add_len >> 16) & 0xff);
@@ -4016,10 +3867,10 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
  *
  * See spc4r17 section 5.7.6.3 and section 6.13.3.2 Table 161 and 162
  */
-static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
+static sense_reason_t
+core_scsi3_pri_read_reservation(struct se_cmd *cmd)
 {
-       struct se_device *se_dev = cmd->se_dev;
-       struct se_subsystem_dev *su_dev = se_dev->se_sub_dev;
+       struct se_device *dev = cmd->se_dev;
        struct t10_pr_registration *pr_reg;
        unsigned char *buf;
        u64 pr_res_key;
@@ -4028,18 +3879,20 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
        if (cmd->data_length < 8) {
                pr_err("PRIN SA READ_RESERVATIONS SCSI Data Length: %u"
                        " too small\n", cmd->data_length);
-               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
-               return -EINVAL;
+               return TCM_INVALID_CDB_FIELD;
        }
 
        buf = transport_kmap_data_sg(cmd);
-       buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff);
-       buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff);
-       buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff);
-       buf[3] = (su_dev->t10_pr.pr_generation & 0xff);
+       if (!buf)
+               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 
-       spin_lock(&se_dev->dev_reservation_lock);
-       pr_reg = se_dev->dev_pr_res_holder;
+       buf[0] = ((dev->t10_pr.pr_generation >> 24) & 0xff);
+       buf[1] = ((dev->t10_pr.pr_generation >> 16) & 0xff);
+       buf[2] = ((dev->t10_pr.pr_generation >> 8) & 0xff);
+       buf[3] = (dev->t10_pr.pr_generation & 0xff);
+
+       spin_lock(&dev->dev_reservation_lock);
+       pr_reg = dev->dev_pr_res_holder;
        if (pr_reg) {
                /*
                 * Set the hardcoded Additional Length
@@ -4090,7 +3943,7 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
        }
 
 err:
-       spin_unlock(&se_dev->dev_reservation_lock);
+       spin_unlock(&dev->dev_reservation_lock);
        transport_kunmap_data_sg(cmd);
 
        return 0;
@@ -4101,21 +3954,23 @@ err:
  *
  * See spc4r17 section 6.13.4 Table 165
  */
-static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
+static sense_reason_t
+core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
 {
        struct se_device *dev = cmd->se_dev;
-       struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
+       struct t10_reservation *pr_tmpl = &dev->t10_pr;
        unsigned char *buf;
        u16 add_len = 8; /* Hardcoded to 8. */
 
        if (cmd->data_length < 6) {
                pr_err("PRIN SA REPORT_CAPABILITIES SCSI Data Length:"
                        " %u too small\n", cmd->data_length);
-               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
-               return -EINVAL;
+               return TCM_INVALID_CDB_FIELD;
        }
 
        buf = transport_kmap_data_sg(cmd);
+       if (!buf)
+               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 
        buf[0] = ((add_len << 8) & 0xff);
        buf[1] = (add_len & 0xff);
@@ -4157,14 +4012,14 @@ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
  *
  * See spc4r17 section 6.13.5 Table 168 and 169
  */
-static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
+static sense_reason_t
+core_scsi3_pri_read_full_status(struct se_cmd *cmd)
 {
-       struct se_device *se_dev = cmd->se_dev;
+       struct se_device *dev = cmd->se_dev;
        struct se_node_acl *se_nacl;
-       struct se_subsystem_dev *su_dev = se_dev->se_sub_dev;
        struct se_portal_group *se_tpg;
        struct t10_pr_registration *pr_reg, *pr_reg_tmp;
-       struct t10_reservation *pr_tmpl = &se_dev->se_sub_dev->t10_pr;
+       struct t10_reservation *pr_tmpl = &dev->t10_pr;
        unsigned char *buf;
        u32 add_desc_len = 0, add_len = 0, desc_len, exp_desc_len;
        u32 off = 8; /* off into first Full Status descriptor */
@@ -4173,16 +4028,17 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
        if (cmd->data_length < 8) {
                pr_err("PRIN SA READ_FULL_STATUS SCSI Data Length: %u"
                        " too small\n", cmd->data_length);
-               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
-               return -EINVAL;
+               return TCM_INVALID_CDB_FIELD;
        }
 
        buf = transport_kmap_data_sg(cmd);
+       if (!buf)
+               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 
-       buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff);
-       buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff);
-       buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff);
-       buf[3] = (su_dev->t10_pr.pr_generation & 0xff);
+       buf[0] = ((dev->t10_pr.pr_generation >> 24) & 0xff);
+       buf[1] = ((dev->t10_pr.pr_generation >> 16) & 0xff);
+       buf[2] = ((dev->t10_pr.pr_generation >> 8) & 0xff);
+       buf[3] = (dev->t10_pr.pr_generation & 0xff);
 
        spin_lock(&pr_tmpl->registration_lock);
        list_for_each_entry_safe(pr_reg, pr_reg_tmp,
@@ -4303,9 +4159,10 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
        return 0;
 }
 
-int target_scsi3_emulate_pr_in(struct se_cmd *cmd)
+sense_reason_t
+target_scsi3_emulate_pr_in(struct se_cmd *cmd)
 {
-       int ret;
+       sense_reason_t ret;
 
        /*
         * Following spc2r20 5.5.1 Reservations overview:
@@ -4316,12 +4173,11 @@ int target_scsi3_emulate_pr_in(struct se_cmd *cmd)
         * initiator or service action and shall terminate with a RESERVATION
         * CONFLICT status.
         */
-       if (cmd->se_dev->dev_flags & DF_SPC2_RESERVATIONS) {
+       if (cmd->se_dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) {
                pr_err("Received PERSISTENT_RESERVE CDB while legacy"
                        " SPC-2 reservation is held, returning"
                        " RESERVATION_CONFLICT\n");
-               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
-               return -EINVAL;
+               return TCM_RESERVATION_CONFLICT;
        }
 
        switch (cmd->t_task_cdb[1] & 0x1f) {
@@ -4340,9 +4196,7 @@ int target_scsi3_emulate_pr_in(struct se_cmd *cmd)
        default:
                pr_err("Unknown PERSISTENT_RESERVE_IN service"
                        " action: 0x%02x\n", cmd->t_task_cdb[1] & 0x1f);
-               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
-               ret = -EINVAL;
-               break;
+               return TCM_INVALID_CDB_FIELD;
        }
 
        if (!ret)
@@ -4350,56 +4204,25 @@ int target_scsi3_emulate_pr_in(struct se_cmd *cmd)
        return ret;
 }
 
-static int core_pt_reservation_check(struct se_cmd *cmd, u32 *pr_res_type)
-{
-       return 0;
-}
-
-static int core_pt_seq_non_holder(
-       struct se_cmd *cmd,
-       unsigned char *cdb,
-       u32 pr_reg_type)
+sense_reason_t
+target_check_reservation(struct se_cmd *cmd)
 {
-       return 0;
-}
+       struct se_device *dev = cmd->se_dev;
+       sense_reason_t ret;
 
-int core_setup_reservations(struct se_device *dev, int force_pt)
-{
-       struct se_subsystem_dev *su_dev = dev->se_sub_dev;
-       struct t10_reservation *rest = &su_dev->t10_pr;
-       /*
-        * If this device is from Target_Core_Mod/pSCSI, use the reservations
-        * of the Underlying SCSI hardware.  In Linux/SCSI terms, this can
-        * cause a problem because libata and some SATA RAID HBAs appear
-        * under Linux/SCSI, but to emulate reservations themselves.
-        */
-       if (((dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) &&
-           !(dev->se_sub_dev->se_dev_attrib.emulate_reservations)) || force_pt) {
-               rest->res_type = SPC_PASSTHROUGH;
-               rest->pr_ops.t10_reservation_check = &core_pt_reservation_check;
-               rest->pr_ops.t10_seq_non_holder = &core_pt_seq_non_holder;
-               pr_debug("%s: Using SPC_PASSTHROUGH, no reservation"
-                       " emulation\n", dev->transport->name);
+       if (!cmd->se_sess)
+               return 0;
+       if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)
+               return 0;
+       if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
                return 0;
-       }
-       /*
-        * If SPC-3 or above is reported by real or emulated struct se_device,
-        * use emulated Persistent Reservations.
-        */
-       if (dev->transport->get_device_rev(dev) >= SCSI_3) {
-               rest->res_type = SPC3_PERSISTENT_RESERVATIONS;
-               rest->pr_ops.t10_reservation_check = &core_scsi3_pr_reservation_check;
-               rest->pr_ops.t10_seq_non_holder = &core_scsi3_pr_seq_non_holder;
-               pr_debug("%s: Using SPC3_PERSISTENT_RESERVATIONS"
-                       " emulation\n", dev->transport->name);
-       } else {
-               rest->res_type = SPC2_RESERVATIONS;
-               rest->pr_ops.t10_reservation_check = &core_scsi2_reservation_check;
-               rest->pr_ops.t10_seq_non_holder =
-                               &core_scsi2_reservation_seq_non_holder;
-               pr_debug("%s: Using SPC2_RESERVATIONS emulation\n",
-                       dev->transport->name);
-       }
 
-       return 0;
+       spin_lock(&dev->dev_reservation_lock);
+       if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
+               ret = target_scsi2_reservation_check(cmd);
+       else
+               ret = target_scsi3_pr_reservation_check(cmd);
+       spin_unlock(&dev->dev_reservation_lock);
+
+       return ret;
 }
index af6c460..b4a0042 100644 (file)
@@ -47,8 +47,8 @@ extern struct kmem_cache *t10_pr_reg_cache;
 
 extern int core_pr_dump_initiator_port(struct t10_pr_registration *,
                        char *, u32);
-extern int target_scsi2_reservation_release(struct se_cmd *);
-extern int target_scsi2_reservation_reserve(struct se_cmd *);
+extern sense_reason_t target_scsi2_reservation_release(struct se_cmd *);
+extern sense_reason_t target_scsi2_reservation_reserve(struct se_cmd *);
 extern int core_scsi3_alloc_aptpl_registration(
                        struct t10_reservation *, u64,
                        unsigned char *, unsigned char *, u32,
@@ -61,8 +61,8 @@ extern void core_scsi3_free_pr_reg_from_nacl(struct se_device *,
 extern void core_scsi3_free_all_registrations(struct se_device *);
 extern unsigned char *core_scsi3_pr_dump_type(int);
 
-extern int target_scsi3_emulate_pr_in(struct se_cmd *);
-extern int target_scsi3_emulate_pr_out(struct se_cmd *);
-extern int core_setup_reservations(struct se_device *, int);
+extern sense_reason_t target_scsi3_emulate_pr_in(struct se_cmd *);
+extern sense_reason_t target_scsi3_emulate_pr_out(struct se_cmd *);
+extern sense_reason_t target_check_reservation(struct se_cmd *);
 
 #endif /* TARGET_CORE_PR_H */
index 617c086..2bcfd79 100644 (file)
@@ -3,10 +3,7 @@
  *
  * This file contains the generic target mode <-> Linux SCSI subsystem plugin.
  *
- * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
- * Copyright (c) 2005, 2006, 2007 SBE, Inc.
- * Copyright (c) 2007-2010 Rising Tide Systems
- * Copyright (c) 2008-2010 Linux-iSCSI.org
+ * (c) Copyright 2003-2012 RisingTide Systems LLC.
  *
  * Nicholas A. Bellinger <nab@kernel.org>
  *
 
 #define ISPRINT(a)  ((a >= ' ') && (a <= '~'))
 
+static inline struct pscsi_dev_virt *PSCSI_DEV(struct se_device *dev)
+{
+       return container_of(dev, struct pscsi_dev_virt, dev);
+}
+
 static struct se_subsystem_api pscsi_template;
 
-static int pscsi_execute_cmd(struct se_cmd *cmd);
+static sense_reason_t pscsi_execute_cmd(struct se_cmd *cmd);
 static void pscsi_req_done(struct request *, int);
 
 /*     pscsi_attach_hba():
@@ -219,7 +221,7 @@ pscsi_get_inquiry_vpd_serial(struct scsi_device *sdev, struct t10_wwn *wwn)
 
        snprintf(&wwn->unit_serial[0], INQUIRY_VPD_SERIAL_LEN, "%s", &buf[4]);
 
-       wwn->t10_sub_dev->su_dev_flags |= SDF_FIRMWARE_VPD_UNIT_SERIAL;
+       wwn->t10_dev->dev_flags |= DF_FIRMWARE_VPD_UNIT_SERIAL;
 
        kfree(buf);
        return 0;
@@ -299,23 +301,13 @@ out:
        kfree(buf);
 }
 
-/*     pscsi_add_device_to_list():
- *
- *
- */
-static struct se_device *pscsi_add_device_to_list(
-       struct se_hba *hba,
-       struct se_subsystem_dev *se_dev,
-       struct pscsi_dev_virt *pdv,
-       struct scsi_device *sd,
-       int dev_flags)
+static int pscsi_add_device_to_list(struct se_device *dev,
+               struct scsi_device *sd)
 {
-       struct se_device *dev;
-       struct se_dev_limits dev_limits;
-       struct request_queue *q;
-       struct queue_limits *limits;
+       struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
+       struct request_queue *q = sd->request_queue;
 
-       memset(&dev_limits, 0, sizeof(struct se_dev_limits));
+       pdv->pdv_sd = sd;
 
        if (!sd->queue_depth) {
                sd->queue_depth = PSCSI_DEFAULT_QUEUEDEPTH;
@@ -324,54 +316,27 @@ static struct se_device *pscsi_add_device_to_list(
                        " queue_depth to %d\n", sd->channel, sd->id,
                                sd->lun, sd->queue_depth);
        }
-       /*
-        * Setup the local scope queue_limits from struct request_queue->limits
-        * to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
-        */
-       q = sd->request_queue;
-       limits = &dev_limits.limits;
-       limits->logical_block_size = sd->sector_size;
-       limits->max_hw_sectors = min_t(int, sd->host->max_sectors, queue_max_hw_sectors(q));
-       limits->max_sectors = min_t(int, sd->host->max_sectors, queue_max_sectors(q));
-       dev_limits.hw_queue_depth = sd->queue_depth;
-       dev_limits.queue_depth = sd->queue_depth;
-       /*
-        * Setup our standard INQUIRY info into se_dev->t10_wwn
-        */
-       pscsi_set_inquiry_info(sd, &se_dev->t10_wwn);
+
+       dev->dev_attrib.hw_block_size = sd->sector_size;
+       dev->dev_attrib.hw_max_sectors =
+               min_t(int, sd->host->max_sectors, queue_max_hw_sectors(q));
+       dev->dev_attrib.hw_queue_depth = sd->queue_depth;
 
        /*
-        * Set the pointer pdv->pdv_sd to from passed struct scsi_device,
-        * which has already been referenced with Linux SCSI code with
-        * scsi_device_get() in this file's pscsi_create_virtdevice().
-        *
-        * The passthrough operations called by the transport_add_device_*
-        * function below will require this pointer to be set for passthroug
-        *  ops.
-        *
-        * For the shutdown case in pscsi_free_device(), this struct
-        * scsi_device  reference is released with Linux SCSI code
-        * scsi_device_put() and the pdv->pdv_sd cleared.
+        * Setup our standard INQUIRY info into se_dev->t10_wwn
         */
-       pdv->pdv_sd = sd;
-       dev = transport_add_device_to_core_hba(hba, &pscsi_template,
-                               se_dev, dev_flags, pdv,
-                               &dev_limits, NULL, NULL);
-       if (!dev) {
-               pdv->pdv_sd = NULL;
-               return NULL;
-       }
+       pscsi_set_inquiry_info(sd, &dev->t10_wwn);
 
        /*
         * Locate VPD WWN Information used for various purposes within
         * the Storage Engine.
         */
-       if (!pscsi_get_inquiry_vpd_serial(sd, &se_dev->t10_wwn)) {
+       if (!pscsi_get_inquiry_vpd_serial(sd, &dev->t10_wwn)) {
                /*
                 * If VPD Unit Serial returned GOOD status, try
                 * VPD Device Identification page (0x83).
                 */
-               pscsi_get_inquiry_vpd_device_ident(sd, &se_dev->t10_wwn);
+               pscsi_get_inquiry_vpd_device_ident(sd, &dev->t10_wwn);
        }
 
        /*
@@ -379,10 +344,11 @@ static struct se_device *pscsi_add_device_to_list(
         */
        if (sd->type == TYPE_TAPE)
                pscsi_tape_read_blocksize(dev, sd);
-       return dev;
+       return 0;
 }
 
-static void *pscsi_allocate_virtdevice(struct se_hba *hba, const char *name)
+static struct se_device *pscsi_alloc_device(struct se_hba *hba,
+               const char *name)
 {
        struct pscsi_dev_virt *pdv;
 
@@ -391,139 +357,125 @@ static void *pscsi_allocate_virtdevice(struct se_hba *hba, const char *name)
                pr_err("Unable to allocate memory for struct pscsi_dev_virt\n");
                return NULL;
        }
-       pdv->pdv_se_hba = hba;
 
        pr_debug("PSCSI: Allocated pdv: %p for %s\n", pdv, name);
-       return pdv;
+       return &pdv->dev;
 }
 
 /*
  * Called with struct Scsi_Host->host_lock called.
  */
-static struct se_device *pscsi_create_type_disk(
-       struct scsi_device *sd,
-       struct pscsi_dev_virt *pdv,
-       struct se_subsystem_dev *se_dev,
-       struct se_hba *hba)
+static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd)
        __releases(sh->host_lock)
 {
-       struct se_device *dev;
-       struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr;
+       struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
+       struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
        struct Scsi_Host *sh = sd->host;
        struct block_device *bd;
-       u32 dev_flags = 0;
+       int ret;
 
        if (scsi_device_get(sd)) {
                pr_err("scsi_device_get() failed for %d:%d:%d:%d\n",
                        sh->host_no, sd->channel, sd->id, sd->lun);
                spin_unlock_irq(sh->host_lock);
-               return NULL;
+               return -EIO;
        }
        spin_unlock_irq(sh->host_lock);
        /*
         * Claim exclusive struct block_device access to struct scsi_device
         * for TYPE_DISK using supplied udev_path
         */
-       bd = blkdev_get_by_path(se_dev->se_dev_udev_path,
+       bd = blkdev_get_by_path(dev->udev_path,
                                FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv);
        if (IS_ERR(bd)) {
                pr_err("pSCSI: blkdev_get_by_path() failed\n");
                scsi_device_put(sd);
-               return NULL;
+               return PTR_ERR(bd);
        }
        pdv->pdv_bd = bd;
 
-       dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags);
-       if (!dev) {
+       ret = pscsi_add_device_to_list(dev, sd);
+       if (ret) {
                blkdev_put(pdv->pdv_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
                scsi_device_put(sd);
-               return NULL;
+               return ret;
        }
+
        pr_debug("CORE_PSCSI[%d] - Added TYPE_DISK for %d:%d:%d:%d\n",
                phv->phv_host_id, sh->host_no, sd->channel, sd->id, sd->lun);
-
-       return dev;
+       return 0;
 }
 
 /*
  * Called with struct Scsi_Host->host_lock called.
  */
-static struct se_device *pscsi_create_type_rom(
-       struct scsi_device *sd,
-       struct pscsi_dev_virt *pdv,
-       struct se_subsystem_dev *se_dev,
-       struct se_hba *hba)
+static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd)
        __releases(sh->host_lock)
 {
-       struct se_device *dev;
-       struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr;
+       struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
        struct Scsi_Host *sh = sd->host;
-       u32 dev_flags = 0;
+       int ret;
 
        if (scsi_device_get(sd)) {
                pr_err("scsi_device_get() failed for %d:%d:%d:%d\n",
                        sh->host_no, sd->channel, sd->id, sd->lun);
                spin_unlock_irq(sh->host_lock);
-               return NULL;
+               return -EIO;
        }
        spin_unlock_irq(sh->host_lock);
 
-       dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags);
-       if (!dev) {
+       ret = pscsi_add_device_to_list(dev, sd);
+       if (ret) {
                scsi_device_put(sd);
-               return NULL;
+               return ret;
        }
        pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n",
                phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
                sd->channel, sd->id, sd->lun);
 
-       return dev;
+       return 0;
 }
 
 /*
- *Called with struct Scsi_Host->host_lock called.
+ * Called with struct Scsi_Host->host_lock called.
  */
-static struct se_device *pscsi_create_type_other(
-       struct scsi_device *sd,
-       struct pscsi_dev_virt *pdv,
-       struct se_subsystem_dev *se_dev,
-       struct se_hba *hba)
+static int pscsi_create_type_other(struct se_device *dev,
+               struct scsi_device *sd)
        __releases(sh->host_lock)
 {
-       struct se_device *dev;
-       struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr;
+       struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
        struct Scsi_Host *sh = sd->host;
-       u32 dev_flags = 0;
+       int ret;
 
        spin_unlock_irq(sh->host_lock);
-       dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags);
-       if (!dev)
-               return NULL;
+       ret = pscsi_add_device_to_list(dev, sd);
+       if (ret)
+               return ret;
 
        pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n",
                phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
                sd->channel, sd->id, sd->lun);
-
-       return dev;
+       return 0;
 }
 
-static struct se_device *pscsi_create_virtdevice(
-       struct se_hba *hba,
-       struct se_subsystem_dev *se_dev,
-       void *p)
+static int pscsi_configure_device(struct se_device *dev)
 {
-       struct pscsi_dev_virt *pdv = p;
-       struct se_device *dev;
+       struct se_hba *hba = dev->se_hba;
+       struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
        struct scsi_device *sd;
-       struct pscsi_hba_virt *phv = hba->hba_ptr;
+       struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
        struct Scsi_Host *sh = phv->phv_lld_host;
        int legacy_mode_enable = 0;
+       int ret;
 
-       if (!pdv) {
-               pr_err("Unable to locate struct pscsi_dev_virt"
-                               " parameter\n");
-               return ERR_PTR(-EINVAL);
+       if (!(pdv->pdv_flags & PDF_HAS_CHANNEL_ID) ||
+           !(pdv->pdv_flags & PDF_HAS_TARGET_ID) ||
+           !(pdv->pdv_flags & PDF_HAS_LUN_ID)) {
+               pr_err("Missing scsi_channel_id=, scsi_target_id= and"
+                       " scsi_lun_id= parameters\n");
+               return -EINVAL;
        }
+
        /*
         * If not running in PHV_LLD_SCSI_HOST_NO mode, locate the
         * struct Scsi_Host we will need to bring the TCM/pSCSI object online
@@ -532,16 +484,16 @@ static struct se_device *pscsi_create_virtdevice(
                if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) {
                        pr_err("pSCSI: Unable to locate struct"
                                " Scsi_Host for PHV_LLD_SCSI_HOST_NO\n");
-                       return ERR_PTR(-ENODEV);
+                       return -ENODEV;
                }
                /*
                 * For the newer PHV_VIRTUAL_HOST_ID struct scsi_device
                 * reference, we enforce that udev_path has been set
                 */
-               if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH)) {
+               if (!(dev->dev_flags & DF_USING_UDEV_PATH)) {
                        pr_err("pSCSI: udev_path attribute has not"
                                " been set before ENABLE=1\n");
-                       return ERR_PTR(-EINVAL);
+                       return -EINVAL;
                }
                /*
                 * If no scsi_host_id= was passed for PHV_VIRTUAL_HOST_ID,
@@ -549,17 +501,14 @@ static struct se_device *pscsi_create_virtdevice(
                 * and enable for PHV_LLD_SCSI_HOST_NO mode.
                 */
                if (!(pdv->pdv_flags & PDF_HAS_VIRT_HOST_ID)) {
-                       spin_lock(&hba->device_lock);
-                       if (!list_empty(&hba->hba_dev_list)) {
+                       if (hba->dev_count) {
                                pr_err("pSCSI: Unable to set hba_mode"
                                        " with active devices\n");
-                               spin_unlock(&hba->device_lock);
-                               return ERR_PTR(-EEXIST);
+                               return -EEXIST;
                        }
-                       spin_unlock(&hba->device_lock);
 
                        if (pscsi_pmode_enable_hba(hba, 1) != 1)
-                               return ERR_PTR(-ENODEV);
+                               return -ENODEV;
 
                        legacy_mode_enable = 1;
                        hba->hba_flags |= HBA_FLAGS_PSCSI_MODE;
@@ -569,14 +518,14 @@ static struct se_device *pscsi_create_virtdevice(
                        if (IS_ERR(sh)) {
                                pr_err("pSCSI: Unable to locate"
                                        " pdv_host_id: %d\n", pdv->pdv_host_id);
-                               return ERR_CAST(sh);
+                               return PTR_ERR(sh);
                        }
                }
        } else {
                if (phv->phv_mode == PHV_VIRTUAL_HOST_ID) {
                        pr_err("pSCSI: PHV_VIRTUAL_HOST_ID set while"
                                " struct Scsi_Host exists\n");
-                       return ERR_PTR(-EEXIST);
+                       return -EEXIST;
                }
        }
 
@@ -593,17 +542,17 @@ static struct se_device *pscsi_create_virtdevice(
                 */
                switch (sd->type) {
                case TYPE_DISK:
-                       dev = pscsi_create_type_disk(sd, pdv, se_dev, hba);
+                       ret = pscsi_create_type_disk(dev, sd);
                        break;
                case TYPE_ROM:
-                       dev = pscsi_create_type_rom(sd, pdv, se_dev, hba);
+                       ret = pscsi_create_type_rom(dev, sd);
                        break;
                default:
-                       dev = pscsi_create_type_other(sd, pdv, se_dev, hba);
+                       ret = pscsi_create_type_other(dev, sd);
                        break;
                }
 
-               if (!dev) {
+               if (ret) {
                        if (phv->phv_mode == PHV_VIRTUAL_HOST_ID)
                                scsi_host_put(sh);
                        else if (legacy_mode_enable) {
@@ -611,9 +560,9 @@ static struct se_device *pscsi_create_virtdevice(
                                hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
                        }
                        pdv->pdv_sd = NULL;
-                       return ERR_PTR(-ENODEV);
+                       return ret;
                }
-               return dev;
+               return 0;
        }
        spin_unlock_irq(sh->host_lock);
 
@@ -627,17 +576,13 @@ static struct se_device *pscsi_create_virtdevice(
                hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
        }
 
-       return ERR_PTR(-ENODEV);
+       return -ENODEV;
 }
 
-/*     pscsi_free_device(): (Part of se_subsystem_api_t template)
- *
- *
- */
-static void pscsi_free_device(void *p)
+static void pscsi_free_device(struct se_device *dev)
 {
-       struct pscsi_dev_virt *pdv = p;
-       struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr;
+       struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
+       struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
        struct scsi_device *sd = pdv->pdv_sd;
 
        if (sd) {
@@ -670,7 +615,7 @@ static void pscsi_free_device(void *p)
 static void pscsi_transport_complete(struct se_cmd *cmd, struct scatterlist *sg,
                                     unsigned char *sense_buffer)
 {
-       struct pscsi_dev_virt *pdv = cmd->se_dev->dev_ptr;
+       struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev);
        struct scsi_device *sd = pdv->pdv_sd;
        int result;
        struct pscsi_plugin_task *pt = cmd->priv;
@@ -694,7 +639,11 @@ static void pscsi_transport_complete(struct se_cmd *cmd, struct scatterlist *sg,
        if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) &&
             (status_byte(result) << 1) == SAM_STAT_GOOD) {
                if (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) {
-                       unsigned char *buf = transport_kmap_data_sg(cmd);
+                       unsigned char *buf;
+
+                       buf = transport_kmap_data_sg(cmd);
+                       if (!buf)
+                               ; /* XXX: TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE */
 
                        if (cdb[0] == MODE_SENSE_10) {
                                if (!(buf[3] & 0x80))
@@ -770,13 +719,11 @@ static match_table_t tokens = {
        {Opt_err, NULL}
 };
 
-static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba,
-       struct se_subsystem_dev *se_dev,
-       const char *page,
-       ssize_t count)
+static ssize_t pscsi_set_configfs_dev_params(struct se_device *dev,
+               const char *page, ssize_t count)
 {
-       struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr;
-       struct pscsi_hba_virt *phv = hba->hba_ptr;
+       struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
+       struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
        char *orig, *ptr, *opts;
        substring_t args[MAX_OPT_ARGS];
        int ret = 0, arg, token;
@@ -841,29 +788,10 @@ out:
        return (!ret) ? count : ret;
 }
 
-static ssize_t pscsi_check_configfs_dev_params(
-       struct se_hba *hba,
-       struct se_subsystem_dev *se_dev)
+static ssize_t pscsi_show_configfs_dev_params(struct se_device *dev, char *b)
 {
-       struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr;
-
-       if (!(pdv->pdv_flags & PDF_HAS_CHANNEL_ID) ||
-           !(pdv->pdv_flags & PDF_HAS_TARGET_ID) ||
-           !(pdv->pdv_flags & PDF_HAS_LUN_ID)) {
-               pr_err("Missing scsi_channel_id=, scsi_target_id= and"
-                       " scsi_lun_id= parameters\n");
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static ssize_t pscsi_show_configfs_dev_params(struct se_hba *hba,
-                                             struct se_subsystem_dev *se_dev,
-                                             char *b)
-{
-       struct pscsi_hba_virt *phv = hba->hba_ptr;
-        struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr;
+       struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
+       struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
        struct scsi_device *sd = pdv->pdv_sd;
        unsigned char host_id[16];
        ssize_t bl;
@@ -929,11 +857,11 @@ static inline struct bio *pscsi_get_bio(int sg_num)
        return bio;
 }
 
-static int pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl,
-               u32 sgl_nents, enum dma_data_direction data_direction,
-               struct bio **hbio)
+static sense_reason_t
+pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
+               enum dma_data_direction data_direction, struct bio **hbio)
 {
-       struct pscsi_dev_virt *pdv = cmd->se_dev->dev_ptr;
+       struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev);
        struct bio *bio = NULL, *tbio = NULL;
        struct page *page;
        struct scatterlist *sg;
@@ -1019,7 +947,7 @@ static int pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl,
                }
        }
 
-       return sgl_nents;
+       return 0;
 fail:
        while (*hbio) {
                bio = *hbio;
@@ -1027,8 +955,7 @@ fail:
                bio->bi_next = NULL;
                bio_endio(bio, 0);      /* XXX: should be error */
        }
-       cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-       return -ENOMEM;
+       return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 }
 
 /*
@@ -1055,17 +982,13 @@ static inline void pscsi_clear_cdb_lun(unsigned char *cdb)
        }
 }
 
-static int pscsi_parse_cdb(struct se_cmd *cmd)
+static sense_reason_t
+pscsi_parse_cdb(struct se_cmd *cmd)
 {
        unsigned char *cdb = cmd->t_task_cdb;
-       unsigned int dummy_size;
-       int ret;
 
-       if (cmd->se_cmd_flags & SCF_BIDI) {
-               cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
-               cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
-               return -EINVAL;
-       }
+       if (cmd->se_cmd_flags & SCF_BIDI)
+               return TCM_UNSUPPORTED_SCSI_OPCODE;
 
        pscsi_clear_cdb_lun(cdb);
 
@@ -1076,10 +999,8 @@ static int pscsi_parse_cdb(struct se_cmd *cmd)
         */
        switch (cdb[0]) {
        case REPORT_LUNS:
-               ret = spc_parse_cdb(cmd, &dummy_size);
-               if (ret)
-                       return ret;
-               break;
+               cmd->execute_cmd = spc_emulate_report_luns;
+               return 0;
        case READ_6:
        case READ_10:
        case READ_12:
@@ -1093,22 +1014,21 @@ static int pscsi_parse_cdb(struct se_cmd *cmd)
                /* FALLTHROUGH*/
        default:
                cmd->execute_cmd = pscsi_execute_cmd;
-               break;
+               return 0;
        }
-
-       return 0;
 }
 
-static int pscsi_execute_cmd(struct se_cmd *cmd)
+static sense_reason_t
+pscsi_execute_cmd(struct se_cmd *cmd)
 {
        struct scatterlist *sgl = cmd->t_data_sg;
        u32 sgl_nents = cmd->t_data_nents;
        enum dma_data_direction data_direction = cmd->data_direction;
-       struct pscsi_dev_virt *pdv = cmd->se_dev->dev_ptr;
+       struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev);
        struct pscsi_plugin_task *pt;
        struct request *req;
        struct bio *hbio;
-       int ret;
+       sense_reason_t ret;
 
        /*
         * Dynamically alloc cdb space, since it may be larger than
@@ -1116,8 +1036,7 @@ static int pscsi_execute_cmd(struct se_cmd *cmd)
         */
        pt = kzalloc(sizeof(*pt) + scsi_command_size(cmd->t_task_cdb), GFP_KERNEL);
        if (!pt) {
-               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-               return -ENOMEM;
+               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
        }
        cmd->priv = pt;
 
@@ -1131,24 +1050,21 @@ static int pscsi_execute_cmd(struct se_cmd *cmd)
                if (!req || IS_ERR(req)) {
                        pr_err("PSCSI: blk_get_request() failed: %ld\n",
                                        req ? IS_ERR(req) : -ENOMEM);
-                       cmd->scsi_sense_reason =
-                               TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+                       ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
                        goto fail;
                }
        } else {
                BUG_ON(!cmd->data_length);
 
                ret = pscsi_map_sg(cmd, sgl, sgl_nents, data_direction, &hbio);
-               if (ret < 0) {
-                       cmd->scsi_sense_reason =
-                               TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               if (ret)
                        goto fail;
-               }
 
                req = blk_make_request(pdv->pdv_sd->request_queue, hbio,
                                       GFP_KERNEL);
                if (IS_ERR(req)) {
                        pr_err("pSCSI: blk_make_request() failed\n");
+                       ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
                        goto fail_free_bio;
                }
        }
@@ -1179,22 +1095,10 @@ fail_free_bio:
                bio->bi_next = NULL;
                bio_endio(bio, 0);      /* XXX: should be error */
        }
-       cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+       ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 fail:
        kfree(pt);
-       return -ENOMEM;
-}
-
-/*     pscsi_get_device_rev():
- *
- *
- */
-static u32 pscsi_get_device_rev(struct se_device *dev)
-{
-       struct pscsi_dev_virt *pdv = dev->dev_ptr;
-       struct scsi_device *sd = pdv->pdv_sd;
-
-       return (sd->scsi_level - 1) ? sd->scsi_level - 1 : 1;
+       return ret;
 }
 
 /*     pscsi_get_device_type():
@@ -1203,7 +1107,7 @@ static u32 pscsi_get_device_rev(struct se_device *dev)
  */
 static u32 pscsi_get_device_type(struct se_device *dev)
 {
-       struct pscsi_dev_virt *pdv = dev->dev_ptr;
+       struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
        struct scsi_device *sd = pdv->pdv_sd;
 
        return sd->type;
@@ -1211,7 +1115,7 @@ static u32 pscsi_get_device_type(struct se_device *dev)
 
 static sector_t pscsi_get_blocks(struct se_device *dev)
 {
-       struct pscsi_dev_virt *pdv = dev->dev_ptr;
+       struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
 
        if (pdv->pdv_bd && pdv->pdv_bd->bd_part)
                return pdv->pdv_bd->bd_part->nr_sects;
@@ -1243,7 +1147,6 @@ static void pscsi_req_done(struct request *req, int uptodate)
                pr_debug("PSCSI Host Byte exception at cmd: %p CDB:"
                        " 0x%02x Result: 0x%08x\n", cmd, pt->pscsi_cdb[0],
                        pt->pscsi_result);
-               cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
                target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
                break;
        }
@@ -1259,15 +1162,13 @@ static struct se_subsystem_api pscsi_template = {
        .attach_hba             = pscsi_attach_hba,
        .detach_hba             = pscsi_detach_hba,
        .pmode_enable_hba       = pscsi_pmode_enable_hba,
-       .allocate_virtdevice    = pscsi_allocate_virtdevice,
-       .create_virtdevice      = pscsi_create_virtdevice,
+       .alloc_device           = pscsi_alloc_device,
+       .configure_device       = pscsi_configure_device,
        .free_device            = pscsi_free_device,
        .transport_complete     = pscsi_transport_complete,
        .parse_cdb              = pscsi_parse_cdb,
-       .check_configfs_dev_params = pscsi_check_configfs_dev_params,
        .set_configfs_dev_params = pscsi_set_configfs_dev_params,
        .show_configfs_dev_params = pscsi_show_configfs_dev_params,
-       .get_device_rev         = pscsi_get_device_rev,
        .get_device_type        = pscsi_get_device_type,
        .get_blocks             = pscsi_get_blocks,
 };
index bc1e5e1..1bd757d 100644 (file)
@@ -37,6 +37,7 @@ struct pscsi_plugin_task {
 #define PDF_HAS_VIRT_HOST_ID   0x20
 
 struct pscsi_dev_virt {
+       struct se_device dev;
        int     pdv_flags;
        int     pdv_host_id;
        int     pdv_channel_id;
@@ -44,7 +45,6 @@ struct pscsi_dev_virt {
        int     pdv_lun_id;
        struct block_device *pdv_bd;
        struct scsi_device *pdv_sd;
-       struct se_hba *pdv_se_hba;
 } ____cacheline_aligned;
 
 typedef enum phv_modes {
index d00bbe3..0457de3 100644 (file)
@@ -4,10 +4,7 @@
  * This file contains the Storage Engine <-> Ramdisk transport
  * specific functions.
  *
- * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
- * Copyright (c) 2005, 2006, 2007 SBE, Inc.
- * Copyright (c) 2007-2010 Rising Tide Systems
- * Copyright (c) 2008-2010 Linux-iSCSI.org
+ * (c) Copyright 2003-2012 RisingTide Systems LLC.
  *
  * Nicholas A. Bellinger <nab@kernel.org>
  *
 
 #include "target_core_rd.h"
 
-static struct se_subsystem_api rd_mcp_template;
+static inline struct rd_dev *RD_DEV(struct se_device *dev)
+{
+       return container_of(dev, struct rd_dev, dev);
+}
 
 /*     rd_attach_hba(): (Part of se_subsystem_api_t template)
  *
@@ -196,7 +196,7 @@ static int rd_build_device_space(struct rd_dev *rd_dev)
        return 0;
 }
 
-static void *rd_allocate_virtdevice(struct se_hba *hba, const char *name)
+static struct se_device *rd_alloc_device(struct se_hba *hba, const char *name)
 {
        struct rd_dev *rd_dev;
        struct rd_host *rd_host = hba->hba_ptr;
@@ -209,39 +209,27 @@ static void *rd_allocate_virtdevice(struct se_hba *hba, const char *name)
 
        rd_dev->rd_host = rd_host;
 
-       return rd_dev;
+       return &rd_dev->dev;
 }
 
-static struct se_device *rd_create_virtdevice(struct se_hba *hba,
-               struct se_subsystem_dev *se_dev, void *p)
+static int rd_configure_device(struct se_device *dev)
 {
-       struct se_device *dev;
-       struct se_dev_limits dev_limits;
-       struct rd_dev *rd_dev = p;
-       struct rd_host *rd_host = hba->hba_ptr;
-       int dev_flags = 0, ret;
-       char prod[16], rev[4];
+       struct rd_dev *rd_dev = RD_DEV(dev);
+       struct rd_host *rd_host = dev->se_hba->hba_ptr;
+       int ret;
 
-       memset(&dev_limits, 0, sizeof(struct se_dev_limits));
+       if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
+               pr_debug("Missing rd_pages= parameter\n");
+               return -EINVAL;
+       }
 
        ret = rd_build_device_space(rd_dev);
        if (ret < 0)
                goto fail;
 
-       snprintf(prod, 16, "RAMDISK-MCP");
-       snprintf(rev, 4, "%s", RD_MCP_VERSION);
-
-       dev_limits.limits.logical_block_size = RD_BLOCKSIZE;
-       dev_limits.limits.max_hw_sectors = UINT_MAX;
-       dev_limits.limits.max_sectors = UINT_MAX;
-       dev_limits.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
-       dev_limits.queue_depth = RD_DEVICE_QUEUE_DEPTH;
-
-       dev = transport_add_device_to_core_hba(hba,
-                       &rd_mcp_template, se_dev, dev_flags, rd_dev,
-                       &dev_limits, prod, rev);
-       if (!dev)
-               goto fail;
+       dev->dev_attrib.hw_block_size = RD_BLOCKSIZE;
+       dev->dev_attrib.hw_max_sectors = UINT_MAX;
+       dev->dev_attrib.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
 
        rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
 
@@ -251,16 +239,16 @@ static struct se_device *rd_create_virtdevice(struct se_hba *hba,
                rd_dev->sg_table_count,
                (unsigned long)(rd_dev->rd_page_count * PAGE_SIZE));
 
-       return dev;
+       return 0;
 
 fail:
        rd_release_device_space(rd_dev);
-       return ERR_PTR(ret);
+       return ret;
 }
 
-static void rd_free_device(void *p)
+static void rd_free_device(struct se_device *dev)
 {
-       struct rd_dev *rd_dev = p;
+       struct rd_dev *rd_dev = RD_DEV(dev);
 
        rd_release_device_space(rd_dev);
        kfree(rd_dev);
@@ -284,13 +272,14 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
        return NULL;
 }
 
-static int rd_execute_rw(struct se_cmd *cmd)
+static sense_reason_t
+rd_execute_rw(struct se_cmd *cmd)
 {
        struct scatterlist *sgl = cmd->t_data_sg;
        u32 sgl_nents = cmd->t_data_nents;
        enum dma_data_direction data_direction = cmd->data_direction;
        struct se_device *se_dev = cmd->se_dev;
-       struct rd_dev *dev = se_dev->dev_ptr;
+       struct rd_dev *dev = RD_DEV(se_dev);
        struct rd_dev_sg_table *table;
        struct scatterlist *rd_sg;
        struct sg_mapping_iter m;
@@ -300,14 +289,14 @@ static int rd_execute_rw(struct se_cmd *cmd)
        u32 src_len;
        u64 tmp;
 
-       tmp = cmd->t_task_lba * se_dev->se_sub_dev->se_dev_attrib.block_size;
+       tmp = cmd->t_task_lba * se_dev->dev_attrib.block_size;
        rd_offset = do_div(tmp, PAGE_SIZE);
        rd_page = tmp;
        rd_size = cmd->data_length;
 
        table = rd_get_sg_table(dev, rd_page);
        if (!table)
-               return -EINVAL;
+               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 
        rd_sg = &table->sg_table[rd_page - table->page_start_offset];
 
@@ -357,7 +346,7 @@ static int rd_execute_rw(struct se_cmd *cmd)
                table = rd_get_sg_table(dev, rd_page);
                if (!table) {
                        sg_miter_stop(&m);
-                       return -EINVAL;
+                       return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
                }
 
                /* since we increment, the first sg entry is correct */
@@ -378,13 +367,10 @@ static match_table_t tokens = {
        {Opt_err, NULL}
 };
 
-static ssize_t rd_set_configfs_dev_params(
-       struct se_hba *hba,
-       struct se_subsystem_dev *se_dev,
-       const char *page,
-       ssize_t count)
+static ssize_t rd_set_configfs_dev_params(struct se_device *dev,
+               const char *page, ssize_t count)
 {
-       struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
+       struct rd_dev *rd_dev = RD_DEV(dev);
        char *orig, *ptr, *opts;
        substring_t args[MAX_OPT_ARGS];
        int ret = 0, arg, token;
@@ -417,24 +403,10 @@ static ssize_t rd_set_configfs_dev_params(
        return (!ret) ? count : ret;
 }
 
-static ssize_t rd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev)
+static ssize_t rd_show_configfs_dev_params(struct se_device *dev, char *b)
 {
-       struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
+       struct rd_dev *rd_dev = RD_DEV(dev);
 
-       if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
-               pr_debug("Missing rd_pages= parameter\n");
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static ssize_t rd_show_configfs_dev_params(
-       struct se_hba *hba,
-       struct se_subsystem_dev *se_dev,
-       char *b)
-{
-       struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
        ssize_t bl = sprintf(b, "TCM RamDisk ID: %u  RamDisk Makeup: rd_mcp\n",
                        rd_dev->rd_dev_id);
        bl += sprintf(b + bl, "        PAGES/PAGE_SIZE: %u*%lu"
@@ -443,48 +415,40 @@ static ssize_t rd_show_configfs_dev_params(
        return bl;
 }
 
-static u32 rd_get_device_rev(struct se_device *dev)
-{
-       return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
-}
-
-static u32 rd_get_device_type(struct se_device *dev)
-{
-       return TYPE_DISK;
-}
-
 static sector_t rd_get_blocks(struct se_device *dev)
 {
-       struct rd_dev *rd_dev = dev->dev_ptr;
+       struct rd_dev *rd_dev = RD_DEV(dev);
+
        unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) /
-                       dev->se_sub_dev->se_dev_attrib.block_size) - 1;
+                       dev->dev_attrib.block_size) - 1;
 
        return blocks_long;
 }
 
-static struct spc_ops rd_spc_ops = {
+static struct sbc_ops rd_sbc_ops = {
        .execute_rw             = rd_execute_rw,
 };
 
-static int rd_parse_cdb(struct se_cmd *cmd)
+static sense_reason_t
+rd_parse_cdb(struct se_cmd *cmd)
 {
-       return sbc_parse_cdb(cmd, &rd_spc_ops);
+       return sbc_parse_cdb(cmd, &rd_sbc_ops);
 }
 
 static struct se_subsystem_api rd_mcp_template = {
        .name                   = "rd_mcp",
+       .inquiry_prod           = "RAMDISK-MCP",
+       .inquiry_rev            = RD_MCP_VERSION,
        .transport_type         = TRANSPORT_PLUGIN_VHBA_VDEV,
        .attach_hba             = rd_attach_hba,
        .detach_hba             = rd_detach_hba,
-       .allocate_virtdevice    = rd_allocate_virtdevice,
-       .create_virtdevice      = rd_create_virtdevice,
+       .alloc_device           = rd_alloc_device,
+       .configure_device       = rd_configure_device,
        .free_device            = rd_free_device,
        .parse_cdb              = rd_parse_cdb,
-       .check_configfs_dev_params = rd_check_configfs_dev_params,
        .set_configfs_dev_params = rd_set_configfs_dev_params,
        .show_configfs_dev_params = rd_show_configfs_dev_params,
-       .get_device_rev         = rd_get_device_rev,
-       .get_device_type        = rd_get_device_type,
+       .get_device_type        = sbc_get_device_type,
        .get_blocks             = rd_get_blocks,
 };
 
index 2145812..933b38b 100644 (file)
@@ -24,6 +24,7 @@ struct rd_dev_sg_table {
 #define RDF_HAS_PAGE_COUNT     0x01
 
 struct rd_dev {
+       struct se_device dev;
        u32             rd_flags;
        /* Unique Ramdisk Device ID in Ramdisk HBA */
        u32             rd_dev_id;
index a6e27d9..26a6d18 100644 (file)
@@ -1,10 +1,7 @@
 /*
  * SCSI Block Commands (SBC) parsing and emulation.
  *
- * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
- * Copyright (c) 2005, 2006, 2007 SBE, Inc.
- * Copyright (c) 2007-2010 Rising Tide Systems
- * Copyright (c) 2008-2010 Linux-iSCSI.org
+ * (c) Copyright 2002-2012 RisingTide Systems LLC.
  *
  * Nicholas A. Bellinger <nab@kernel.org>
  *
@@ -37,7 +34,8 @@
 #include "target_core_ua.h"
 
 
-static int sbc_emulate_readcapacity(struct se_cmd *cmd)
+static sense_reason_t
+sbc_emulate_readcapacity(struct se_cmd *cmd)
 {
        struct se_device *dev = cmd->se_dev;
        unsigned long long blocks_long = dev->transport->get_blocks(dev);
@@ -54,22 +52,24 @@ static int sbc_emulate_readcapacity(struct se_cmd *cmd)
        buf[1] = (blocks >> 16) & 0xff;
        buf[2] = (blocks >> 8) & 0xff;
        buf[3] = blocks & 0xff;
-       buf[4] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff;
-       buf[5] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff;
-       buf[6] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff;
-       buf[7] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff;
+       buf[4] = (dev->dev_attrib.block_size >> 24) & 0xff;
+       buf[5] = (dev->dev_attrib.block_size >> 16) & 0xff;
+       buf[6] = (dev->dev_attrib.block_size >> 8) & 0xff;
+       buf[7] = dev->dev_attrib.block_size & 0xff;
 
        rbuf = transport_kmap_data_sg(cmd);
-       if (rbuf) {
-               memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
-               transport_kunmap_data_sg(cmd);
-       }
+       if (!rbuf)
+               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+       memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
+       transport_kunmap_data_sg(cmd);
 
        target_complete_cmd(cmd, GOOD);
        return 0;
 }
 
-static int sbc_emulate_readcapacity_16(struct se_cmd *cmd)
+static sense_reason_t
+sbc_emulate_readcapacity_16(struct se_cmd *cmd)
 {
        struct se_device *dev = cmd->se_dev;
        unsigned char *rbuf;
@@ -85,28 +85,29 @@ static int sbc_emulate_readcapacity_16(struct se_cmd *cmd)
        buf[5] = (blocks >> 16) & 0xff;
        buf[6] = (blocks >> 8) & 0xff;
        buf[7] = blocks & 0xff;
-       buf[8] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff;
-       buf[9] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff;
-       buf[10] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff;
-       buf[11] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff;
+       buf[8] = (dev->dev_attrib.block_size >> 24) & 0xff;
+       buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff;
+       buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff;
+       buf[11] = dev->dev_attrib.block_size & 0xff;
        /*
         * Set Thin Provisioning Enable bit following sbc3r22 in section
         * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.
         */
-       if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws)
+       if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws)
                buf[14] = 0x80;
 
        rbuf = transport_kmap_data_sg(cmd);
-       if (rbuf) {
-               memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
-               transport_kunmap_data_sg(cmd);
-       }
+       if (!rbuf)
+               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+       memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
+       transport_kunmap_data_sg(cmd);
 
        target_complete_cmd(cmd, GOOD);
        return 0;
 }
 
-int spc_get_write_same_sectors(struct se_cmd *cmd)
+sector_t spc_get_write_same_sectors(struct se_cmd *cmd)
 {
        u32 num_blocks;
 
@@ -129,13 +130,8 @@ int spc_get_write_same_sectors(struct se_cmd *cmd)
 }
 EXPORT_SYMBOL(spc_get_write_same_sectors);
 
-static int sbc_emulate_verify(struct se_cmd *cmd)
-{
-       target_complete_cmd(cmd, GOOD);
-       return 0;
-}
-
-static int sbc_emulate_noop(struct se_cmd *cmd)
+static sense_reason_t
+sbc_emulate_noop(struct se_cmd *cmd)
 {
        target_complete_cmd(cmd, GOOD);
        return 0;
@@ -143,7 +139,7 @@ static int sbc_emulate_noop(struct se_cmd *cmd)
 
 static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors)
 {
-       return cmd->se_dev->se_sub_dev->se_dev_attrib.block_size * sectors;
+       return cmd->se_dev->dev_attrib.block_size * sectors;
 }
 
 static int sbc_check_valid_sectors(struct se_cmd *cmd)
@@ -152,7 +148,7 @@ static int sbc_check_valid_sectors(struct se_cmd *cmd)
        unsigned long long end_lba;
        u32 sectors;
 
-       sectors = cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size;
+       sectors = cmd->data_length / dev->dev_attrib.block_size;
        end_lba = dev->transport->get_blocks(dev) + 1;
 
        if (cmd->t_task_lba + sectors > end_lba) {
@@ -236,26 +232,37 @@ static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
        return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
 }
 
-static int sbc_write_same_supported(struct se_device *dev,
-               unsigned char *flags)
+static sense_reason_t
+sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops)
 {
+       unsigned int sectors = spc_get_write_same_sectors(cmd);
+
        if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
                pr_err("WRITE_SAME PBDATA and LBDATA"
                        " bits not supported for Block Discard"
                        " Emulation\n");
-               return -ENOSYS;
+               return TCM_UNSUPPORTED_SCSI_OPCODE;
+       }
+       if (sectors > cmd->se_dev->dev_attrib.max_write_same_len) {
+               pr_warn("WRITE_SAME sectors: %u exceeds max_write_same_len: %u\n",
+                       sectors, cmd->se_dev->dev_attrib.max_write_same_len);
+               return TCM_INVALID_CDB_FIELD;
        }
-
        /*
-        * Currently for the emulated case we only accept
-        * tpws with the UNMAP=1 bit set.
+        * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting
+        * translated into block discard requests within backend code.
         */
-       if (!(flags[0] & 0x08)) {
-               pr_err("WRITE_SAME w/o UNMAP bit not"
-                       " supported for Block Discard Emulation\n");
-               return -ENOSYS;
+       if (flags[0] & 0x08) {
+               if (!ops->execute_write_same_unmap)
+                       return TCM_UNSUPPORTED_SCSI_OPCODE;
+
+               cmd->execute_cmd = ops->execute_write_same_unmap;
+               return 0;
        }
+       if (!ops->execute_write_same)
+               return TCM_UNSUPPORTED_SCSI_OPCODE;
 
+       cmd->execute_cmd = ops->execute_write_same;
        return 0;
 }
 
@@ -313,14 +320,14 @@ out:
        kfree(buf);
 }
 
-int sbc_parse_cdb(struct se_cmd *cmd, struct spc_ops *ops)
+sense_reason_t
+sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
 {
-       struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
        struct se_device *dev = cmd->se_dev;
        unsigned char *cdb = cmd->t_task_cdb;
        unsigned int size;
        u32 sectors = 0;
-       int ret;
+       sense_reason_t ret;
 
        switch (cdb[0]) {
        case READ_6:
@@ -379,9 +386,9 @@ int sbc_parse_cdb(struct se_cmd *cmd, struct spc_ops *ops)
                cmd->execute_cmd = ops->execute_rw;
                break;
        case XDWRITEREAD_10:
-               if ((cmd->data_direction != DMA_TO_DEVICE) ||
+               if (cmd->data_direction != DMA_TO_DEVICE ||
                    !(cmd->se_cmd_flags & SCF_BIDI))
-                       goto out_invalid_cdb_field;
+                       return TCM_INVALID_CDB_FIELD;
                sectors = transport_get_sectors_10(cdb);
 
                cmd->t_task_lba = transport_lba_32(cdb);
@@ -419,27 +426,24 @@ int sbc_parse_cdb(struct se_cmd *cmd, struct spc_ops *ops)
                                cmd->se_cmd_flags |= SCF_FUA;
                        break;
                case WRITE_SAME_32:
-                       if (!ops->execute_write_same)
-                               goto out_unsupported_cdb;
-
                        sectors = transport_get_sectors_32(cdb);
                        if (!sectors) {
                                pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
                                       " supported\n");
-                               goto out_invalid_cdb_field;
+                               return TCM_INVALID_CDB_FIELD;
                        }
 
                        size = sbc_get_size(cmd, 1);
                        cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
 
-                       if (sbc_write_same_supported(dev, &cdb[10]) < 0)
-                               goto out_unsupported_cdb;
-                       cmd->execute_cmd = ops->execute_write_same;
+                       ret = sbc_setup_write_same(cmd, &cdb[10], ops);
+                       if (ret)
+                               return ret;
                        break;
                default:
                        pr_err("VARIABLE_LENGTH_CMD service action"
                                " 0x%04x not supported\n", service_action);
-                       goto out_unsupported_cdb;
+                       return TCM_UNSUPPORTED_SCSI_OPCODE;
                }
                break;
        }
@@ -455,7 +459,7 @@ int sbc_parse_cdb(struct se_cmd *cmd, struct spc_ops *ops)
                default:
                        pr_err("Unsupported SA: 0x%02x\n",
                                cmd->t_task_cdb[1] & 0x1f);
-                       goto out_invalid_cdb_field;
+                       return TCM_INVALID_CDB_FIELD;
                }
                size = (cdb[10] << 24) | (cdb[11] << 16) |
                       (cdb[12] << 8) | cdb[13];
@@ -463,7 +467,7 @@ int sbc_parse_cdb(struct se_cmd *cmd, struct spc_ops *ops)
        case SYNCHRONIZE_CACHE:
        case SYNCHRONIZE_CACHE_16:
                if (!ops->execute_sync_cache)
-                       goto out_unsupported_cdb;
+                       return TCM_UNSUPPORTED_SCSI_OPCODE;
 
                /*
                 * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
@@ -484,42 +488,36 @@ int sbc_parse_cdb(struct se_cmd *cmd, struct spc_ops *ops)
                 */
                if (cmd->t_task_lba || sectors) {
                        if (sbc_check_valid_sectors(cmd) < 0)
-                               goto out_invalid_cdb_field;
+                               return TCM_INVALID_CDB_FIELD;
                }
                cmd->execute_cmd = ops->execute_sync_cache;
                break;
        case UNMAP:
                if (!ops->execute_unmap)
-                       goto out_unsupported_cdb;
+                       return TCM_UNSUPPORTED_SCSI_OPCODE;
 
                size = get_unaligned_be16(&cdb[7]);
                cmd->execute_cmd = ops->execute_unmap;
                break;
        case WRITE_SAME_16:
-               if (!ops->execute_write_same)
-                       goto out_unsupported_cdb;
-
                sectors = transport_get_sectors_16(cdb);
                if (!sectors) {
                        pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
-                       goto out_invalid_cdb_field;
+                       return TCM_INVALID_CDB_FIELD;
                }
 
                size = sbc_get_size(cmd, 1);
                cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
 
-               if (sbc_write_same_supported(dev, &cdb[1]) < 0)
-                       goto out_unsupported_cdb;
-               cmd->execute_cmd = ops->execute_write_same;
+               ret = sbc_setup_write_same(cmd, &cdb[1], ops);
+               if (ret)
+                       return ret;
                break;
        case WRITE_SAME:
-               if (!ops->execute_write_same)
-                       goto out_unsupported_cdb;
-
                sectors = transport_get_sectors_10(cdb);
                if (!sectors) {
                        pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
-                       goto out_invalid_cdb_field;
+                       return TCM_INVALID_CDB_FIELD;
                }
 
                size = sbc_get_size(cmd, 1);
@@ -529,13 +527,13 @@ int sbc_parse_cdb(struct se_cmd *cmd, struct spc_ops *ops)
                 * Follow sbcr26 with WRITE_SAME (10) and check for the existence
                 * of byte 1 bit 3 UNMAP instead of original reserved field
                 */
-               if (sbc_write_same_supported(dev, &cdb[1]) < 0)
-                       goto out_unsupported_cdb;
-               cmd->execute_cmd = ops->execute_write_same;
+               ret = sbc_setup_write_same(cmd, &cdb[1], ops);
+               if (ret)
+                       return ret;
                break;
        case VERIFY:
                size = 0;
-               cmd->execute_cmd = sbc_emulate_verify;
+               cmd->execute_cmd = sbc_emulate_noop;
                break;
        case REZERO_UNIT:
        case SEEK_6:
@@ -557,24 +555,24 @@ int sbc_parse_cdb(struct se_cmd *cmd, struct spc_ops *ops)
 
        /* reject any command that we don't have a handler for */
        if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && !cmd->execute_cmd)
-               goto out_unsupported_cdb;
+               return TCM_UNSUPPORTED_SCSI_OPCODE;
 
        if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
                unsigned long long end_lba;
 
-               if (sectors > su_dev->se_dev_attrib.fabric_max_sectors) {
+               if (sectors > dev->dev_attrib.fabric_max_sectors) {
                        printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
                                " big sectors %u exceeds fabric_max_sectors:"
                                " %u\n", cdb[0], sectors,
-                               su_dev->se_dev_attrib.fabric_max_sectors);
-                       goto out_invalid_cdb_field;
+                               dev->dev_attrib.fabric_max_sectors);
+                       return TCM_INVALID_CDB_FIELD;
                }
-               if (sectors > su_dev->se_dev_attrib.hw_max_sectors) {
+               if (sectors > dev->dev_attrib.hw_max_sectors) {
                        printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
                                " big sectors %u exceeds backend hw_max_sectors:"
                                " %u\n", cdb[0], sectors,
-                               su_dev->se_dev_attrib.hw_max_sectors);
-                       goto out_invalid_cdb_field;
+                               dev->dev_attrib.hw_max_sectors);
+                       return TCM_INVALID_CDB_FIELD;
                }
 
                end_lba = dev->transport->get_blocks(dev) + 1;
@@ -582,25 +580,18 @@ int sbc_parse_cdb(struct se_cmd *cmd, struct spc_ops *ops)
                        pr_err("cmd exceeds last lba %llu "
                                "(lba %llu, sectors %u)\n",
                                end_lba, cmd->t_task_lba, sectors);
-                       goto out_invalid_cdb_field;
+                       return TCM_INVALID_CDB_FIELD;
                }
 
                size = sbc_get_size(cmd, sectors);
        }
 
-       ret = target_cmd_size_check(cmd, size);
-       if (ret < 0)
-               return ret;
-
-       return 0;
-
-out_unsupported_cdb:
-       cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
-       cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
-       return -EINVAL;
-out_invalid_cdb_field:
-       cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
-       cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
-       return -EINVAL;
+       return target_cmd_size_check(cmd, size);
 }
 EXPORT_SYMBOL(sbc_parse_cdb);
+
+u32 sbc_get_device_type(struct se_device *dev)
+{
+       return TYPE_DISK;
+}
+EXPORT_SYMBOL(sbc_get_device_type);
index 6fd434d..84f9e96 100644 (file)
@@ -1,10 +1,7 @@
 /*
  * SCSI Primary Commands (SPC) parsing and emulation.
  *
- * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
- * Copyright (c) 2005, 2006, 2007 SBE, Inc.
- * Copyright (c) 2007-2010 Rising Tide Systems
- * Copyright (c) 2008-2010 Linux-iSCSI.org
+ * (c) Copyright 2002-2012 RisingTide Systems LLC.
  *
  * Nicholas A. Bellinger <nab@kernel.org>
  *
@@ -69,7 +66,8 @@ static void spc_fill_alua_data(struct se_port *port, unsigned char *buf)
        spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
 }
 
-static int spc_emulate_inquiry_std(struct se_cmd *cmd, char *buf)
+static sense_reason_t
+spc_emulate_inquiry_std(struct se_cmd *cmd, char *buf)
 {
        struct se_lun *lun = cmd->se_lun;
        struct se_device *dev = cmd->se_dev;
@@ -78,7 +76,7 @@ static int spc_emulate_inquiry_std(struct se_cmd *cmd, char *buf)
        if (dev->transport->get_device_type(dev) == TYPE_TAPE)
                buf[1] = 0x80;
 
-       buf[2] = dev->transport->get_device_rev(dev);
+       buf[2] = 0x05; /* SPC-3 */
 
        /*
         * NORMACA and HISUP = 0, RESPONSE DATA FORMAT = 2
@@ -95,34 +93,32 @@ static int spc_emulate_inquiry_std(struct se_cmd *cmd, char *buf)
        /*
         * Enable SCCS and TPGS fields for Emulated ALUA
         */
-       if (dev->se_sub_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED)
-               spc_fill_alua_data(lun->lun_sep, buf);
+       spc_fill_alua_data(lun->lun_sep, buf);
 
        buf[7] = 0x2; /* CmdQue=1 */
 
        snprintf(&buf[8], 8, "LIO-ORG");
-       snprintf(&buf[16], 16, "%s", dev->se_sub_dev->t10_wwn.model);
-       snprintf(&buf[32], 4, "%s", dev->se_sub_dev->t10_wwn.revision);
+       snprintf(&buf[16], 16, "%s", dev->t10_wwn.model);
+       snprintf(&buf[32], 4, "%s", dev->t10_wwn.revision);
        buf[4] = 31; /* Set additional length to 31 */
 
        return 0;
 }
 
 /* unit serial number */
-static int spc_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
+static sense_reason_t
+spc_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
 {
        struct se_device *dev = cmd->se_dev;
        u16 len = 0;
 
-       if (dev->se_sub_dev->su_dev_flags &
-                       SDF_EMULATED_VPD_UNIT_SERIAL) {
+       if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) {
                u32 unit_serial_len;
 
-               unit_serial_len = strlen(dev->se_sub_dev->t10_wwn.unit_serial);
+               unit_serial_len = strlen(dev->t10_wwn.unit_serial);
                unit_serial_len++; /* For NULL Terminator */
 
-               len += sprintf(&buf[4], "%s",
-                       dev->se_sub_dev->t10_wwn.unit_serial);
+               len += sprintf(&buf[4], "%s", dev->t10_wwn.unit_serial);
                len++; /* Extra Byte for NULL Terminator */
                buf[3] = len;
        }
@@ -132,7 +128,7 @@ static int spc_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
 static void spc_parse_naa_6h_vendor_specific(struct se_device *dev,
                unsigned char *buf)
 {
-       unsigned char *p = &dev->se_sub_dev->t10_wwn.unit_serial[0];
+       unsigned char *p = &dev->t10_wwn.unit_serial[0];
        int cnt;
        bool next = true;
 
@@ -164,7 +160,8 @@ static void spc_parse_naa_6h_vendor_specific(struct se_device *dev,
  * Device identification VPD, for a complete list of
  * DESIGNATOR TYPEs see spc4r17 Table 459.
  */
-static int spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
+static sense_reason_t
+spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
 {
        struct se_device *dev = cmd->se_dev;
        struct se_lun *lun = cmd->se_lun;
@@ -173,7 +170,7 @@ static int spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
        struct t10_alua_lu_gp_member *lu_gp_mem;
        struct t10_alua_tg_pt_gp *tg_pt_gp;
        struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
-       unsigned char *prod = &dev->se_sub_dev->t10_wwn.model[0];
+       unsigned char *prod = &dev->t10_wwn.model[0];
        u32 prod_len;
        u32 unit_serial_len, off = 0;
        u16 len = 0, id_len;
@@ -188,7 +185,7 @@ static int spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
         * /sys/kernel/config/target/core/$HBA/$DEV/wwn/vpd_unit_serial
         * value in order to return the NAA id.
         */
-       if (!(dev->se_sub_dev->su_dev_flags & SDF_EMULATED_VPD_UNIT_SERIAL))
+       if (!(dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL))
                goto check_t10_vend_desc;
 
        /* CODE SET == Binary */
@@ -236,14 +233,12 @@ check_t10_vend_desc:
        prod_len += strlen(prod);
        prod_len++; /* For : */
 
-       if (dev->se_sub_dev->su_dev_flags &
-                       SDF_EMULATED_VPD_UNIT_SERIAL) {
-               unit_serial_len =
-                       strlen(&dev->se_sub_dev->t10_wwn.unit_serial[0]);
+       if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) {
+               unit_serial_len = strlen(&dev->t10_wwn.unit_serial[0]);
                unit_serial_len++; /* For NULL Terminator */
 
                id_len += sprintf(&buf[off+12], "%s:%s", prod,
-                               &dev->se_sub_dev->t10_wwn.unit_serial[0]);
+                               &dev->t10_wwn.unit_serial[0]);
        }
        buf[off] = 0x2; /* ASCII */
        buf[off+1] = 0x1; /* T10 Vendor ID */
@@ -298,10 +293,6 @@ check_t10_vend_desc:
                 * Get the PROTOCOL IDENTIFIER as defined by spc4r17
                 * section 7.5.1 Table 362
                 */
-               if (dev->se_sub_dev->t10_alua.alua_type !=
-                               SPC3_ALUA_EMULATED)
-                       goto check_scsi_name;
-
                tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
                if (!tg_pt_gp_mem)
                        goto check_lu_gp;
@@ -415,20 +406,22 @@ check_scsi_name:
 }
 
 /* Extended INQUIRY Data VPD Page */
-static int spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
+static sense_reason_t
+spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
 {
        buf[3] = 0x3c;
        /* Set HEADSUP, ORDSUP, SIMPSUP */
        buf[5] = 0x07;
 
        /* If WriteCache emulation is enabled, set V_SUP */
-       if (cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0)
+       if (cmd->se_dev->dev_attrib.emulate_write_cache > 0)
                buf[6] = 0x01;
        return 0;
 }
 
 /* Block Limits VPD page */
-static int spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
+static sense_reason_t
+spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
 {
        struct se_device *dev = cmd->se_dev;
        u32 max_sectors;
@@ -439,7 +432,7 @@ static int spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
         * emulate_tpu=1 or emulate_tpws=1 we will be expect a
         * different page length for Thin Provisioning.
         */
-       if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws)
+       if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws)
                have_tp = 1;
 
        buf[0] = dev->transport->get_device_type(dev);
@@ -456,62 +449,70 @@ static int spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
        /*
         * Set MAXIMUM TRANSFER LENGTH
         */
-       max_sectors = min(dev->se_sub_dev->se_dev_attrib.fabric_max_sectors,
-                         dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
+       max_sectors = min(dev->dev_attrib.fabric_max_sectors,
+                         dev->dev_attrib.hw_max_sectors);
        put_unaligned_be32(max_sectors, &buf[8]);
 
        /*
         * Set OPTIMAL TRANSFER LENGTH
         */
-       put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.optimal_sectors, &buf[12]);
+       put_unaligned_be32(dev->dev_attrib.optimal_sectors, &buf[12]);
 
        /*
         * Exit now if we don't support TP.
         */
        if (!have_tp)
-               return 0;
+               goto max_write_same;
 
        /*
         * Set MAXIMUM UNMAP LBA COUNT
         */
-       put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count, &buf[20]);
+       put_unaligned_be32(dev->dev_attrib.max_unmap_lba_count, &buf[20]);
 
        /*
         * Set MAXIMUM UNMAP BLOCK DESCRIPTOR COUNT
         */
-       put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count,
+       put_unaligned_be32(dev->dev_attrib.max_unmap_block_desc_count,
                           &buf[24]);
 
        /*
         * Set OPTIMAL UNMAP GRANULARITY
         */
-       put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.unmap_granularity, &buf[28]);
+       put_unaligned_be32(dev->dev_attrib.unmap_granularity, &buf[28]);
 
        /*
         * UNMAP GRANULARITY ALIGNMENT
         */
-       put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment,
+       put_unaligned_be32(dev->dev_attrib.unmap_granularity_alignment,
                           &buf[32]);
-       if (dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment != 0)
+       if (dev->dev_attrib.unmap_granularity_alignment != 0)
                buf[32] |= 0x80; /* Set the UGAVALID bit */
 
+       /*
+        * MAXIMUM WRITE SAME LENGTH
+        */
+max_write_same:
+       put_unaligned_be64(dev->dev_attrib.max_write_same_len, &buf[36]);
+
        return 0;
 }
 
 /* Block Device Characteristics VPD page */
-static int spc_emulate_evpd_b1(struct se_cmd *cmd, unsigned char *buf)
+static sense_reason_t
+spc_emulate_evpd_b1(struct se_cmd *cmd, unsigned char *buf)
 {
        struct se_device *dev = cmd->se_dev;
 
        buf[0] = dev->transport->get_device_type(dev);
        buf[3] = 0x3c;
-       buf[5] = dev->se_sub_dev->se_dev_attrib.is_nonrot ? 1 : 0;
+       buf[5] = dev->dev_attrib.is_nonrot ? 1 : 0;
 
        return 0;
 }
 
 /* Thin Provisioning VPD */
-static int spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
+static sense_reason_t
+spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
 {
        struct se_device *dev = cmd->se_dev;
 
@@ -546,7 +547,7 @@ static int spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
         * the UNMAP command (see 5.25). A TPU bit set to zero indicates
         * that the device server does not support the UNMAP command.
         */
-       if (dev->se_sub_dev->se_dev_attrib.emulate_tpu != 0)
+       if (dev->dev_attrib.emulate_tpu != 0)
                buf[5] = 0x80;
 
        /*
@@ -555,17 +556,18 @@ static int spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
         * A TPWS bit set to zero indicates that the device server does not
         * support the use of the WRITE SAME (16) command to unmap LBAs.
         */
-       if (dev->se_sub_dev->se_dev_attrib.emulate_tpws != 0)
+       if (dev->dev_attrib.emulate_tpws != 0)
                buf[5] |= 0x40;
 
        return 0;
 }
 
-static int spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf);
+static sense_reason_t
+spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf);
 
 static struct {
        uint8_t         page;
-       int             (*emulate)(struct se_cmd *, unsigned char *);
+       sense_reason_t  (*emulate)(struct se_cmd *, unsigned char *);
 } evpd_handlers[] = {
        { .page = 0x00, .emulate = spc_emulate_evpd_00 },
        { .page = 0x80, .emulate = spc_emulate_evpd_80 },
@@ -577,7 +579,8 @@ static struct {
 };
 
 /* supported vital product data pages */
-static int spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
+static sense_reason_t
+spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
 {
        int p;
 
@@ -586,8 +589,7 @@ static int spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
         * Registered Extended LUN WWN has been set via ConfigFS
         * during device creation/restart.
         */
-       if (cmd->se_dev->se_sub_dev->su_dev_flags &
-                       SDF_EMULATED_VPD_UNIT_SERIAL) {
+       if (cmd->se_dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) {
                buf[3] = ARRAY_SIZE(evpd_handlers);
                for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p)
                        buf[p + 4] = evpd_handlers[p].page;
@@ -596,14 +598,16 @@ static int spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
        return 0;
 }
 
-static int spc_emulate_inquiry(struct se_cmd *cmd)
+static sense_reason_t
+spc_emulate_inquiry(struct se_cmd *cmd)
 {
        struct se_device *dev = cmd->se_dev;
        struct se_portal_group *tpg = cmd->se_lun->lun_sep->sep_tpg;
        unsigned char *rbuf;
        unsigned char *cdb = cmd->t_task_cdb;
        unsigned char buf[SE_INQUIRY_BUF];
-       int p, ret;
+       sense_reason_t ret;
+       int p;
 
        memset(buf, 0, SE_INQUIRY_BUF);
 
@@ -616,8 +620,7 @@ static int spc_emulate_inquiry(struct se_cmd *cmd)
                if (cdb[2]) {
                        pr_err("INQUIRY with EVPD==0 but PAGE CODE=%02x\n",
                               cdb[2]);
-                       cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
-                       ret = -EINVAL;
+                       ret = TCM_INVALID_CDB_FIELD;
                        goto out;
                }
 
@@ -634,33 +637,43 @@ static int spc_emulate_inquiry(struct se_cmd *cmd)
        }
 
        pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]);
-       cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
-       ret = -EINVAL;
+       ret = TCM_INVALID_CDB_FIELD;
 
 out:
        rbuf = transport_kmap_data_sg(cmd);
-       if (rbuf) {
-               memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
-               transport_kunmap_data_sg(cmd);
-       }
+       if (!rbuf)
+               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+       memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
+       transport_kunmap_data_sg(cmd);
 
        if (!ret)
                target_complete_cmd(cmd, GOOD);
        return ret;
 }
 
-static int spc_modesense_rwrecovery(unsigned char *p)
+static int spc_modesense_rwrecovery(struct se_device *dev, u8 pc, u8 *p)
 {
        p[0] = 0x01;
        p[1] = 0x0a;
 
+       /* No changeable values for now */
+       if (pc == 1)
+               goto out;
+
+out:
        return 12;
 }
 
-static int spc_modesense_control(struct se_device *dev, unsigned char *p)
+static int spc_modesense_control(struct se_device *dev, u8 pc, u8 *p)
 {
        p[0] = 0x0a;
        p[1] = 0x0a;
+
+       /* No changeable values for now */
+       if (pc == 1)
+               goto out;
+
        p[2] = 2;
        /*
         * From spc4r23, 7.4.7 Control mode page
@@ -690,7 +703,7 @@ static int spc_modesense_control(struct se_device *dev, unsigned char *p)
         * command sequence order shall be explicitly handled by the application client
         * through the selection of appropriate ommands and task attributes.
         */
-       p[3] = (dev->se_sub_dev->se_dev_attrib.emulate_rest_reord == 1) ? 0x00 : 0x10;
+       p[3] = (dev->dev_attrib.emulate_rest_reord == 1) ? 0x00 : 0x10;
        /*
         * From spc4r17, section 7.4.6 Control mode Page
         *
@@ -720,8 +733,8 @@ static int spc_modesense_control(struct se_device *dev, unsigned char *p)
         * for a BUSY, TASK SET FULL, or RESERVATION CONFLICT status regardless
         * to the number of commands completed with one of those status codes.
         */
-       p[4] = (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2) ? 0x30 :
-              (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 1) ? 0x20 : 0x00;
+       p[4] = (dev->dev_attrib.emulate_ua_intlck_ctrl == 2) ? 0x30 :
+              (dev->dev_attrib.emulate_ua_intlck_ctrl == 1) ? 0x20 : 0x00;
        /*
         * From spc4r17, section 7.4.6 Control mode Page
         *
@@ -734,25 +747,56 @@ static int spc_modesense_control(struct se_device *dev, unsigned char *p)
         * which the command was received shall be completed with TASK ABORTED
         * status (see SAM-4).
         */
-       p[5] = (dev->se_sub_dev->se_dev_attrib.emulate_tas) ? 0x40 : 0x00;
+       p[5] = (dev->dev_attrib.emulate_tas) ? 0x40 : 0x00;
        p[8] = 0xff;
        p[9] = 0xff;
        p[11] = 30;
 
+out:
        return 12;
 }
 
-static int spc_modesense_caching(struct se_device *dev, unsigned char *p)
+static int spc_modesense_caching(struct se_device *dev, u8 pc, u8 *p)
 {
        p[0] = 0x08;
        p[1] = 0x12;
-       if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0)
+
+       /* No changeable values for now */
+       if (pc == 1)
+               goto out;
+
+       if (dev->dev_attrib.emulate_write_cache > 0)
                p[2] = 0x04; /* Write Cache Enable */
        p[12] = 0x20; /* Disabled Read Ahead */
 
+out:
        return 20;
 }
 
+static int spc_modesense_informational_exceptions(struct se_device *dev, u8 pc, unsigned char *p)
+{
+       p[0] = 0x1c;
+       p[1] = 0x0a;
+
+       /* No changeable values for now */
+       if (pc == 1)
+               goto out;
+
+out:
+       return 12;
+}
+
+static struct {
+       uint8_t         page;
+       uint8_t         subpage;
+       int             (*emulate)(struct se_device *, u8, unsigned char *);
+} modesense_handlers[] = {
+       { .page = 0x01, .subpage = 0x00, .emulate = spc_modesense_rwrecovery },
+       { .page = 0x08, .subpage = 0x00, .emulate = spc_modesense_caching },
+       { .page = 0x0a, .subpage = 0x00, .emulate = spc_modesense_control },
+       { .page = 0x1c, .subpage = 0x00, .emulate = spc_modesense_informational_exceptions },
+};
+
 static void spc_modesense_write_protect(unsigned char *buf, int type)
 {
        /*
@@ -779,82 +823,224 @@ static void spc_modesense_dpofua(unsigned char *buf, int type)
        }
 }
 
-static int spc_emulate_modesense(struct se_cmd *cmd)
+static int spc_modesense_blockdesc(unsigned char *buf, u64 blocks, u32 block_size)
+{
+       *buf++ = 8;
+       put_unaligned_be32(min(blocks, 0xffffffffull), buf);
+       buf += 4;
+       put_unaligned_be32(block_size, buf);
+       return 9;
+}
+
+static int spc_modesense_long_blockdesc(unsigned char *buf, u64 blocks, u32 block_size)
+{
+       if (blocks <= 0xffffffff)
+               return spc_modesense_blockdesc(buf + 3, blocks, block_size) + 3;
+
+       *buf++ = 1;             /* LONGLBA */
+       buf += 2;
+       *buf++ = 16;
+       put_unaligned_be64(blocks, buf);
+       buf += 12;
+       put_unaligned_be32(block_size, buf);
+
+       return 17;
+}
+
+static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
 {
        struct se_device *dev = cmd->se_dev;
        char *cdb = cmd->t_task_cdb;
-       unsigned char *rbuf;
+       unsigned char *buf, *map_buf;
        int type = dev->transport->get_device_type(dev);
        int ten = (cmd->t_task_cdb[0] == MODE_SENSE_10);
-       u32 offset = ten ? 8 : 4;
+       bool dbd = !!(cdb[1] & 0x08);
+       bool llba = ten ? !!(cdb[1] & 0x10) : false;
+       u8 pc = cdb[2] >> 6;
+       u8 page = cdb[2] & 0x3f;
+       u8 subpage = cdb[3];
        int length = 0;
-       unsigned char buf[SE_MODE_PAGE_BUF];
-
-       memset(buf, 0, SE_MODE_PAGE_BUF);
+       int ret;
+       int i;
 
-       switch (cdb[2] & 0x3f) {
-       case 0x01:
-               length = spc_modesense_rwrecovery(&buf[offset]);
-               break;
-       case 0x08:
-               length = spc_modesense_caching(dev, &buf[offset]);
-               break;
-       case 0x0a:
-               length = spc_modesense_control(dev, &buf[offset]);
-               break;
-       case 0x3f:
-               length = spc_modesense_rwrecovery(&buf[offset]);
-               length += spc_modesense_caching(dev, &buf[offset+length]);
-               length += spc_modesense_control(dev, &buf[offset+length]);
-               break;
-       default:
-               pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n",
-                      cdb[2] & 0x3f, cdb[3]);
-               cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE;
-               return -EINVAL;
+       map_buf = transport_kmap_data_sg(cmd);
+       if (!map_buf)
+               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+       /*
+        * If SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is not set, then we
+        * know we actually allocated a full page.  Otherwise, if the
+        * data buffer is too small, allocate a temporary buffer so we
+        * don't have to worry about overruns in all our INQUIRY
+        * emulation handling.
+        */
+       if (cmd->data_length < SE_MODE_PAGE_BUF &&
+           (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)) {
+               buf = kzalloc(SE_MODE_PAGE_BUF, GFP_KERNEL);
+               if (!buf) {
+                       transport_kunmap_data_sg(cmd);
+                       return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               }
+       } else {
+               buf = map_buf;
        }
-       offset += length;
-
-       if (ten) {
-               offset -= 2;
-               buf[0] = (offset >> 8) & 0xff;
-               buf[1] = offset & 0xff;
-               offset += 2;
-
-               if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
-                   (cmd->se_deve &&
-                   (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
-                       spc_modesense_write_protect(&buf[3], type);
-
-               if ((dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) &&
-                   (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0))
-                       spc_modesense_dpofua(&buf[3], type);
+       /*
+        * Skip over MODE DATA LENGTH + MEDIUM TYPE fields to byte 3 for
+        * MODE_SENSE_10 and byte 2 for MODE_SENSE (6).
+        */
+       length = ten ? 3 : 2;
+
+       /* DEVICE-SPECIFIC PARAMETER */
+       if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
+           (cmd->se_deve &&
+            (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
+               spc_modesense_write_protect(&buf[length], type);
+
+       if ((dev->dev_attrib.emulate_write_cache > 0) &&
+           (dev->dev_attrib.emulate_fua_write > 0))
+               spc_modesense_dpofua(&buf[length], type);
+
+       ++length;
+
+       /* BLOCK DESCRIPTOR */
+
+       /*
+        * For now we only include a block descriptor for disk (SBC)
+        * devices; other command sets use a slightly different format.
+        */
+       if (!dbd && type == TYPE_DISK) {
+               u64 blocks = dev->transport->get_blocks(dev);
+               u32 block_size = dev->dev_attrib.block_size;
+
+               if (ten) {
+                       if (llba) {
+                               length += spc_modesense_long_blockdesc(&buf[length],
+                                                                      blocks, block_size);
+                       } else {
+                               length += 3;
+                               length += spc_modesense_blockdesc(&buf[length],
+                                                                 blocks, block_size);
+                       }
+               } else {
+                       length += spc_modesense_blockdesc(&buf[length], blocks,
+                                                         block_size);
+               }
        } else {
-               offset -= 1;
-               buf[0] = offset & 0xff;
-               offset += 1;
-
-               if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
-                   (cmd->se_deve &&
-                   (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
-                       spc_modesense_write_protect(&buf[2], type);
-
-               if ((dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) &&
-                   (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0))
-                       spc_modesense_dpofua(&buf[2], type);
+               if (ten)
+                       length += 4;
+               else
+                       length += 1;
        }
 
-       rbuf = transport_kmap_data_sg(cmd);
-       if (rbuf) {
-               memcpy(rbuf, buf, min(offset, cmd->data_length));
-               transport_kunmap_data_sg(cmd);
+       if (page == 0x3f) {
+               if (subpage != 0x00 && subpage != 0xff) {
+                       pr_warn("MODE_SENSE: Invalid subpage code: 0x%02x\n", subpage);
+                       kfree(buf);
+                       transport_kunmap_data_sg(cmd);
+                       return TCM_INVALID_CDB_FIELD;
+               }
+
+               for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i) {
+                       /*
+                        * Tricky way to say all subpage 00h for
+                        * subpage==0, all subpages for subpage==0xff
+                        * (and we just checked above that those are
+                        * the only two possibilities).
+                        */
+                       if ((modesense_handlers[i].subpage & ~subpage) == 0) {
+                               ret = modesense_handlers[i].emulate(dev, pc, &buf[length]);
+                               if (!ten && length + ret >= 255)
+                                       break;
+                               length += ret;
+                       }
+               }
+
+               goto set_length;
+       }
+
+       for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i)
+               if (modesense_handlers[i].page == page &&
+                   modesense_handlers[i].subpage == subpage) {
+                       length += modesense_handlers[i].emulate(dev, pc, &buf[length]);
+                       goto set_length;
+               }
+
+       /*
+        * We don't intend to implement:
+        *  - obsolete page 03h "format parameters" (checked by Solaris)
+        */
+       if (page != 0x03)
+               pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n",
+                      page, subpage);
+
+       transport_kunmap_data_sg(cmd);
+       return TCM_UNKNOWN_MODE_PAGE;
+
+set_length:
+       if (ten)
+               put_unaligned_be16(length - 2, buf);
+       else
+               buf[0] = length - 1;
+
+       if (buf != map_buf) {
+               memcpy(map_buf, buf, cmd->data_length);
+               kfree(buf);
        }
 
+       transport_kunmap_data_sg(cmd);
        target_complete_cmd(cmd, GOOD);
        return 0;
 }
 
-static int spc_emulate_request_sense(struct se_cmd *cmd)
+static sense_reason_t spc_emulate_modeselect(struct se_cmd *cmd)
+{
+       struct se_device *dev = cmd->se_dev;
+       char *cdb = cmd->t_task_cdb;
+       bool ten = cdb[0] == MODE_SELECT_10;
+       int off = ten ? 8 : 4;
+       bool pf = !!(cdb[1] & 0x10);
+       u8 page, subpage;
+       unsigned char *buf;
+       unsigned char tbuf[SE_MODE_PAGE_BUF];
+       int length;
+       int ret = 0;
+       int i;
+
+       buf = transport_kmap_data_sg(cmd);
+       if (!buf)
+               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+       if (!pf) {
+               ret = TCM_INVALID_CDB_FIELD;
+               goto out;
+       }
+
+       page = buf[off] & 0x3f;
+       subpage = buf[off] & 0x40 ? buf[off + 1] : 0;
+
+       for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i)
+               if (modesense_handlers[i].page == page &&
+                   modesense_handlers[i].subpage == subpage) {
+                       memset(tbuf, 0, SE_MODE_PAGE_BUF);
+                       length = modesense_handlers[i].emulate(dev, 0, tbuf);
+                       goto check_contents;
+               }
+
+       ret = TCM_UNKNOWN_MODE_PAGE;
+       goto out;
+
+check_contents:
+       if (memcmp(buf + off, tbuf, length))
+               ret = TCM_INVALID_PARAMETER_LIST;
+
+out:
+       transport_kunmap_data_sg(cmd);
+
+       if (!ret)
+               target_complete_cmd(cmd, GOOD);
+       return ret;
+}
+
+static sense_reason_t spc_emulate_request_sense(struct se_cmd *cmd)
 {
        unsigned char *cdb = cmd->t_task_cdb;
        unsigned char *rbuf;
@@ -866,19 +1052,14 @@ static int spc_emulate_request_sense(struct se_cmd *cmd)
        if (cdb[1] & 0x01) {
                pr_err("REQUEST_SENSE description emulation not"
                        " supported\n");
-               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
-               return -ENOSYS;
+               return TCM_INVALID_CDB_FIELD;
        }
 
        rbuf = transport_kmap_data_sg(cmd);
-       if (cmd->scsi_sense_reason != 0) {
-               /*
-                * Out of memory.  We will fail with CHECK CONDITION, so
-                * we must not clear the unit attention condition.
-                */
-               target_complete_cmd(cmd, CHECK_CONDITION);
-               return 0;
-       } else if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq)) {
+       if (!rbuf)
+               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+       if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq)) {
                /*
                 * CURRENT ERROR, UNIT ATTENTION
                 */
@@ -905,33 +1086,97 @@ static int spc_emulate_request_sense(struct se_cmd *cmd)
                buf[7] = 0x0A;
        }
 
-       if (rbuf) {
-               memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
-               transport_kunmap_data_sg(cmd);
+       memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
+       transport_kunmap_data_sg(cmd);
+
+       target_complete_cmd(cmd, GOOD);
+       return 0;
+}
+
+sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
+{
+       struct se_dev_entry *deve;
+       struct se_session *sess = cmd->se_sess;
+       unsigned char *buf;
+       u32 lun_count = 0, offset = 8, i;
+
+       if (cmd->data_length < 16) {
+               pr_warn("REPORT LUNS allocation length %u too small\n",
+                       cmd->data_length);
+               return TCM_INVALID_CDB_FIELD;
+       }
+
+       buf = transport_kmap_data_sg(cmd);
+       if (!buf)
+               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+       /*
+        * If no struct se_session pointer is present, this struct se_cmd is
+        * coming via a target_core_mod PASSTHROUGH op, and not through
+        * a $FABRIC_MOD.  In that case, report LUN=0 only.
+        */
+       if (!sess) {
+               int_to_scsilun(0, (struct scsi_lun *)&buf[offset]);
+               lun_count = 1;
+               goto done;
+       }
+
+       spin_lock_irq(&sess->se_node_acl->device_list_lock);
+       for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
+               deve = sess->se_node_acl->device_list[i];
+               if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
+                       continue;
+               /*
+                * We determine the correct LUN LIST LENGTH even once we
+                * have reached the initial allocation length.
+                * See SPC2-R20 7.19.
+                */
+               lun_count++;
+               if ((offset + 8) > cmd->data_length)
+                       continue;
+
+               int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]);
+               offset += 8;
        }
+       spin_unlock_irq(&sess->se_node_acl->device_list_lock);
+
+       /*
+        * See SPC3 r07, page 159.
+        */
+done:
+       lun_count *= 8;
+       buf[0] = ((lun_count >> 24) & 0xff);
+       buf[1] = ((lun_count >> 16) & 0xff);
+       buf[2] = ((lun_count >> 8) & 0xff);
+       buf[3] = (lun_count & 0xff);
+       transport_kunmap_data_sg(cmd);
 
        target_complete_cmd(cmd, GOOD);
        return 0;
 }
+EXPORT_SYMBOL(spc_emulate_report_luns);
 
-static int spc_emulate_testunitready(struct se_cmd *cmd)
+static sense_reason_t
+spc_emulate_testunitready(struct se_cmd *cmd)
 {
        target_complete_cmd(cmd, GOOD);
        return 0;
 }
 
-int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
+sense_reason_t
+spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
 {
        struct se_device *dev = cmd->se_dev;
-       struct se_subsystem_dev *su_dev = dev->se_sub_dev;
        unsigned char *cdb = cmd->t_task_cdb;
 
        switch (cdb[0]) {
        case MODE_SELECT:
                *size = cdb[4];
+               cmd->execute_cmd = spc_emulate_modeselect;
                break;
        case MODE_SELECT_10:
                *size = (cdb[7] << 8) + cdb[8];
+               cmd->execute_cmd = spc_emulate_modeselect;
                break;
        case MODE_SENSE:
                *size = cdb[4];
@@ -946,14 +1191,12 @@ int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
                *size = (cdb[7] << 8) + cdb[8];
                break;
        case PERSISTENT_RESERVE_IN:
-               if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
-                       cmd->execute_cmd = target_scsi3_emulate_pr_in;
                *size = (cdb[7] << 8) + cdb[8];
+               cmd->execute_cmd = target_scsi3_emulate_pr_in;
                break;
        case PERSISTENT_RESERVE_OUT:
-               if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
-                       cmd->execute_cmd = target_scsi3_emulate_pr_out;
                *size = (cdb[7] << 8) + cdb[8];
+               cmd->execute_cmd = target_scsi3_emulate_pr_out;
                break;
        case RELEASE:
        case RELEASE_10:
@@ -962,8 +1205,7 @@ int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
                else
                        *size = cmd->data_length;
 
-               if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
-                       cmd->execute_cmd = target_scsi2_reservation_release;
+               cmd->execute_cmd = target_scsi2_reservation_release;
                break;
        case RESERVE:
        case RESERVE_10:
@@ -976,15 +1218,7 @@ int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
                else
                        *size = cmd->data_length;
 
-               /*
-                * Setup the legacy emulated handler for SPC-2 and
-                * >= SPC-3 compatible reservation handling (CRH=1)
-                * Otherwise, we assume the underlying SCSI logic is
-                * is running in SPC_PASSTHROUGH, and wants reservations
-                * emulation disabled.
-                */
-               if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
-                       cmd->execute_cmd = target_scsi2_reservation_reserve;
+               cmd->execute_cmd = target_scsi2_reservation_reserve;
                break;
        case REQUEST_SENSE:
                *size = cdb[4];
@@ -997,8 +1231,7 @@ int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
                 * Do implict HEAD_OF_QUEUE processing for INQUIRY.
                 * See spc4r17 section 5.3
                 */
-               if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
-                       cmd->sam_task_attr = MSG_HEAD_TAG;
+               cmd->sam_task_attr = MSG_HEAD_TAG;
                cmd->execute_cmd = spc_emulate_inquiry;
                break;
        case SECURITY_PROTOCOL_IN:
@@ -1020,14 +1253,13 @@ int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
                *size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
                break;
        case REPORT_LUNS:
-               cmd->execute_cmd = target_report_luns;
+               cmd->execute_cmd = spc_emulate_report_luns;
                *size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
                /*
                 * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS
                 * See spc4r17 section 5.3
                 */
-               if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
-                       cmd->sam_task_attr = MSG_HEAD_TAG;
+               cmd->sam_task_attr = MSG_HEAD_TAG;
                break;
        case TEST_UNIT_READY:
                cmd->execute_cmd = spc_emulate_testunitready;
@@ -1039,8 +1271,7 @@ int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
                         * MAINTENANCE_IN from SCC-2
                         * Check for emulated MI_REPORT_TARGET_PGS
                         */
-                       if ((cdb[1] & 0x1f) == MI_REPORT_TARGET_PGS &&
-                           su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
+                       if ((cdb[1] & 0x1f) == MI_REPORT_TARGET_PGS) {
                                cmd->execute_cmd =
                                        target_emulate_report_target_port_groups;
                        }
@@ -1058,8 +1289,7 @@ int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
                         * MAINTENANCE_OUT from SCC-2
                         * Check for emulated MO_SET_TARGET_PGS.
                         */
-                       if (cdb[1] == MO_SET_TARGET_PGS &&
-                           su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
+                       if (cdb[1] == MO_SET_TARGET_PGS) {
                                cmd->execute_cmd =
                                        target_emulate_set_target_port_groups;
                        }
@@ -1075,9 +1305,7 @@ int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
                pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode"
                        " 0x%02x, sending CHECK_CONDITION.\n",
                        cmd->se_tfo->get_fabric_name(), cdb[0]);
-               cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
-               cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
-               return -EINVAL;
+               return TCM_UNSUPPORTED_SCSI_OPCODE;
        }
 
        return 0;
index cb6b003..d154ce7 100644 (file)
@@ -1,13 +1,10 @@
 /*******************************************************************************
  * Filename:  target_core_stat.c
  *
- * Copyright (c) 2011 Rising Tide Systems
- * Copyright (c) 2011 Linux-iSCSI.org
- *
  * Modern ConfigFS group context specific statistics based on original
  * target_core_mib.c code
  *
- * Copyright (c) 2006-2007 SBE, Inc.  All Rights Reserved.
+ * (c) Copyright 2006-2012 RisingTide Systems LLC.
  *
  * Nicholas A. Bellinger <nab@linux-iscsi.org>
  *
@@ -80,13 +77,9 @@ static struct target_stat_scsi_dev_attribute                         \
 static ssize_t target_stat_scsi_dev_show_attr_inst(
        struct se_dev_stat_grps *sgrps, char *page)
 {
-       struct se_subsystem_dev *se_subdev = container_of(sgrps,
-                       struct se_subsystem_dev, dev_stat_grps);
-       struct se_hba *hba = se_subdev->se_dev_hba;
-       struct se_device *dev = se_subdev->se_dev_ptr;
-
-       if (!dev)
-               return -ENODEV;
+       struct se_device *dev =
+               container_of(sgrps, struct se_device, dev_stat_grps);
+       struct se_hba *hba = dev->se_hba;
 
        return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
 }
@@ -95,12 +88,8 @@ DEV_STAT_SCSI_DEV_ATTR_RO(inst);
 static ssize_t target_stat_scsi_dev_show_attr_indx(
        struct se_dev_stat_grps *sgrps, char *page)
 {
-       struct se_subsystem_dev *se_subdev = container_of(sgrps,
-                       struct se_subsystem_dev, dev_stat_grps);
-       struct se_device *dev = se_subdev->se_dev_ptr;
-
-       if (!dev)
-               return -ENODEV;
+       struct se_device *dev =
+               container_of(sgrps, struct se_device, dev_stat_grps);
 
        return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index);
 }
@@ -109,13 +98,6 @@ DEV_STAT_SCSI_DEV_ATTR_RO(indx);
 static ssize_t target_stat_scsi_dev_show_attr_role(
        struct se_dev_stat_grps *sgrps, char *page)
 {
-       struct se_subsystem_dev *se_subdev = container_of(sgrps,
-                       struct se_subsystem_dev, dev_stat_grps);
-       struct se_device *dev = se_subdev->se_dev_ptr;
-
-       if (!dev)
-               return -ENODEV;
-
        return snprintf(page, PAGE_SIZE, "Target\n");
 }
 DEV_STAT_SCSI_DEV_ATTR_RO(role);
@@ -123,12 +105,8 @@ DEV_STAT_SCSI_DEV_ATTR_RO(role);
 static ssize_t target_stat_scsi_dev_show_attr_ports(
        struct se_dev_stat_grps *sgrps, char *page)
 {
-       struct se_subsystem_dev *se_subdev = container_of(sgrps,
-                       struct se_subsystem_dev, dev_stat_grps);
-       struct se_device *dev = se_subdev->se_dev_ptr;
-
-       if (!dev)
-               return -ENODEV;
+       struct se_device *dev =
+               container_of(sgrps, struct se_device, dev_stat_grps);
 
        return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_port_count);
 }
@@ -176,13 +154,9 @@ static struct target_stat_scsi_tgt_dev_attribute                   \
 static ssize_t target_stat_scsi_tgt_dev_show_attr_inst(
        struct se_dev_stat_grps *sgrps, char *page)
 {
-       struct se_subsystem_dev *se_subdev = container_of(sgrps,
-                       struct se_subsystem_dev, dev_stat_grps);
-       struct se_hba *hba = se_subdev->se_dev_hba;
-       struct se_device *dev = se_subdev->se_dev_ptr;
-
-       if (!dev)
-               return -ENODEV;
+       struct se_device *dev =
+               container_of(sgrps, struct se_device, dev_stat_grps);
+       struct se_hba *hba = dev->se_hba;
 
        return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
 }
@@ -191,12 +165,8 @@ DEV_STAT_SCSI_TGT_DEV_ATTR_RO(inst);
 static ssize_t target_stat_scsi_tgt_dev_show_attr_indx(
        struct se_dev_stat_grps *sgrps, char *page)
 {
-       struct se_subsystem_dev *se_subdev = container_of(sgrps,
-                       struct se_subsystem_dev, dev_stat_grps);
-       struct se_device *dev = se_subdev->se_dev_ptr;
-
-       if (!dev)
-               return -ENODEV;
+       struct se_device *dev =
+               container_of(sgrps, struct se_device, dev_stat_grps);
 
        return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index);
 }
@@ -205,13 +175,6 @@ DEV_STAT_SCSI_TGT_DEV_ATTR_RO(indx);
 static ssize_t target_stat_scsi_tgt_dev_show_attr_num_lus(
        struct se_dev_stat_grps *sgrps, char *page)
 {
-       struct se_subsystem_dev *se_subdev = container_of(sgrps,
-                       struct se_subsystem_dev, dev_stat_grps);
-       struct se_device *dev = se_subdev->se_dev_ptr;
-
-       if (!dev)
-               return -ENODEV;
-
        return snprintf(page, PAGE_SIZE, "%u\n", LU_COUNT);
 }
 DEV_STAT_SCSI_TGT_DEV_ATTR_RO(num_lus);
@@ -219,60 +182,27 @@ DEV_STAT_SCSI_TGT_DEV_ATTR_RO(num_lus);
 static ssize_t target_stat_scsi_tgt_dev_show_attr_status(
        struct se_dev_stat_grps *sgrps, char *page)
 {
-       struct se_subsystem_dev *se_subdev = container_of(sgrps,
-                       struct se_subsystem_dev, dev_stat_grps);
-       struct se_device *dev = se_subdev->se_dev_ptr;
-       char status[16];
+       struct se_device *dev =
+               container_of(sgrps, struct se_device, dev_stat_grps);
 
-       if (!dev)
-               return -ENODEV;
-
-       switch (dev->dev_status) {
-       case TRANSPORT_DEVICE_ACTIVATED:
-               strcpy(status, "activated");
-               break;
-       case TRANSPORT_DEVICE_DEACTIVATED:
-               strcpy(status, "deactivated");
-               break;
-       case TRANSPORT_DEVICE_SHUTDOWN:
-               strcpy(status, "shutdown");
-               break;
-       case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
-       case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
-               strcpy(status, "offline");
-               break;
-       default:
-               sprintf(status, "unknown(%d)", dev->dev_status);
-               break;
-       }
-
-       return snprintf(page, PAGE_SIZE, "%s\n", status);
+       if (dev->export_count)
+               return snprintf(page, PAGE_SIZE, "activated");
+       else
+               return snprintf(page, PAGE_SIZE, "deactivated");
 }
 DEV_STAT_SCSI_TGT_DEV_ATTR_RO(status);
 
 static ssize_t target_stat_scsi_tgt_dev_show_attr_non_access_lus(
        struct se_dev_stat_grps *sgrps, char *page)
 {
-       struct se_subsystem_dev *se_subdev = container_of(sgrps,
-                       struct se_subsystem_dev, dev_stat_grps);
-       struct se_device *dev = se_subdev->se_dev_ptr;
+       struct se_device *dev =
+               container_of(sgrps, struct se_device, dev_stat_grps);
        int non_accessible_lus;
 
-       if (!dev)
-               return -ENODEV;
-
-       switch (dev->dev_status) {
-       case TRANSPORT_DEVICE_ACTIVATED:
+       if (dev->export_count)
                non_accessible_lus = 0;
-               break;
-       case TRANSPORT_DEVICE_DEACTIVATED:
-       case TRANSPORT_DEVICE_SHUTDOWN:
-       case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
-       case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
-       default:
+       else
                non_accessible_lus = 1;
-               break;
-       }
 
        return snprintf(page, PAGE_SIZE, "%u\n", non_accessible_lus);
 }
@@ -281,12 +211,8 @@ DEV_STAT_SCSI_TGT_DEV_ATTR_RO(non_access_lus);
 static ssize_t target_stat_scsi_tgt_dev_show_attr_resets(
        struct se_dev_stat_grps *sgrps, char *page)
 {
-       struct se_subsystem_dev *se_subdev = container_of(sgrps,
-                       struct se_subsystem_dev, dev_stat_grps);
-       struct se_device *dev = se_subdev->se_dev_ptr;
-
-       if (!dev)
-               return -ENODEV;
+       struct se_device *dev =
+               container_of(sgrps, struct se_device, dev_stat_grps);
 
        return snprintf(page, PAGE_SIZE, "%u\n", dev->num_resets);
 }
@@ -335,13 +261,9 @@ static struct target_stat_scsi_lu_attribute target_stat_scsi_lu_##_name = \
 static ssize_t target_stat_scsi_lu_show_attr_inst(
        struct se_dev_stat_grps *sgrps, char *page)
 {
-       struct se_subsystem_dev *se_subdev = container_of(sgrps,
-                       struct se_subsystem_dev, dev_stat_grps);
-       struct se_hba *hba = se_subdev->se_dev_hba;
-       struct se_device *dev = se_subdev->se_dev_ptr;
-
-       if (!dev)
-               return -ENODEV;
+       struct se_device *dev =
+               container_of(sgrps, struct se_device, dev_stat_grps);
+       struct se_hba *hba = dev->se_hba;
 
        return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index);
 }
@@ -350,12 +272,8 @@ DEV_STAT_SCSI_LU_ATTR_RO(inst);
 static ssize_t target_stat_scsi_lu_show_attr_dev(
        struct se_dev_stat_grps *sgrps, char *page)
 {
-       struct se_subsystem_dev *se_subdev = container_of(sgrps,
-                       struct se_subsystem_dev, dev_stat_grps);
-       struct se_device *dev = se_subdev->se_dev_ptr;
-
-       if (!dev)
-               return -ENODEV;
+       struct se_device *dev =
+               container_of(sgrps, struct se_device, dev_stat_grps);
 
        return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index);
 }
@@ -364,13 +282,6 @@ DEV_STAT_SCSI_LU_ATTR_RO(dev);
 static ssize_t target_stat_scsi_lu_show_attr_indx(
        struct se_dev_stat_grps *sgrps, char *page)
 {
-       struct se_subsystem_dev *se_subdev = container_of(sgrps,
-                       struct se_subsystem_dev, dev_stat_grps);
-       struct se_device *dev = se_subdev->se_dev_ptr;
-
-       if (!dev)
-               return -ENODEV;
-
        return snprintf(page, PAGE_SIZE, "%u\n", SCSI_LU_INDEX);
 }
 DEV_STAT_SCSI_LU_ATTR_RO(indx);
@@ -378,12 +289,6 @@ DEV_STAT_SCSI_LU_ATTR_RO(indx);
 static ssize_t target_stat_scsi_lu_show_attr_lun(
        struct se_dev_stat_grps *sgrps, char *page)
 {
-       struct se_subsystem_dev *se_subdev = container_of(sgrps,
-                       struct se_subsystem_dev, dev_stat_grps);
-       struct se_device *dev = se_subdev->se_dev_ptr;
-
-       if (!dev)
-               return -ENODEV;
        /* FIXME: scsiLuDefaultLun */
        return snprintf(page, PAGE_SIZE, "%llu\n", (unsigned long long)0);
 }
@@ -392,35 +297,28 @@ DEV_STAT_SCSI_LU_ATTR_RO(lun);
 static ssize_t target_stat_scsi_lu_show_attr_lu_name(
        struct se_dev_stat_grps *sgrps, char *page)
 {
-       struct se_subsystem_dev *se_subdev = container_of(sgrps,
-                       struct se_subsystem_dev, dev_stat_grps);
-       struct se_device *dev = se_subdev->se_dev_ptr;
+       struct se_device *dev =
+               container_of(sgrps, struct se_device, dev_stat_grps);
 
-       if (!dev)
-               return -ENODEV;
        /* scsiLuWwnName */
        return snprintf(page, PAGE_SIZE, "%s\n",
-                       (strlen(dev->se_sub_dev->t10_wwn.unit_serial)) ?
-                       dev->se_sub_dev->t10_wwn.unit_serial : "None");
+                       (strlen(dev->t10_wwn.unit_serial)) ?
+                       dev->t10_wwn.unit_serial : "None");
 }
 DEV_STAT_SCSI_LU_ATTR_RO(lu_name);
 
 static ssize_t target_stat_scsi_lu_show_attr_vend(
        struct se_dev_stat_grps *sgrps, char *page)
 {
-       struct se_subsystem_dev *se_subdev = container_of(sgrps,
-                       struct se_subsystem_dev, dev_stat_grps);
-       struct se_device *dev = se_subdev->se_dev_ptr;
+       struct se_device *dev =
+               container_of(sgrps, struct se_device, dev_stat_grps);
        int i;
-       char str[sizeof(dev->se_sub_dev->t10_wwn.vendor)+1];
-
-       if (!dev)
-               return -ENODEV;
+       char str[sizeof(dev->t10_wwn.vendor)+1];
 
        /* scsiLuVendorId */
-       for (i = 0; i < sizeof(dev->se_sub_dev->t10_wwn.vendor); i++)
-               str[i] = ISPRINT(dev->se_sub_dev->t10_wwn.vendor[i]) ?
-                       dev->se_sub_dev->t10_wwn.vendor[i] : ' ';
+       for (i = 0; i < sizeof(dev->t10_wwn.vendor); i++)
+               str[i] = ISPRINT(dev->t10_wwn.vendor[i]) ?
+                       dev->t10_wwn.vendor[i] : ' ';
        str[i] = '\0';
        return snprintf(page, PAGE_SIZE, "%s\n", str);
 }
@@ -429,19 +327,15 @@ DEV_STAT_SCSI_LU_ATTR_RO(vend);
 static ssize_t target_stat_scsi_lu_show_attr_prod(
        struct se_dev_stat_grps *sgrps, char *page)
 {
-       struct se_subsystem_dev *se_subdev = container_of(sgrps,
-                       struct se_subsystem_dev, dev_stat_grps);
-       struct se_device *dev = se_subdev->se_dev_ptr;
+       struct se_device *dev =
+               container_of(sgrps, struct se_device, dev_stat_grps);
        int i;
-       char str[sizeof(dev->se_sub_dev->t10_wwn.model)+1];
-
-       if (!dev)
-               return -ENODEV;
+       char str[sizeof(dev->t10_wwn.model)+1];
 
        /* scsiLuProductId */
-       for (i = 0; i < sizeof(dev->se_sub_dev->t10_wwn.vendor); i++)
-               str[i] = ISPRINT(dev->se_sub_dev->t10_wwn.model[i]) ?
-                       dev->se_sub_dev->t10_wwn.model[i] : ' ';
+       for (i = 0; i < sizeof(dev->t10_wwn.vendor); i++)
+               str[i] = ISPRINT(dev->t10_wwn.model[i]) ?
+                       dev->t10_wwn.model[i] : ' ';
        str[i] = '\0';
        return snprintf(page, PAGE_SIZE, "%s\n", str);
 }
@@ -450,19 +344,15 @@ DEV_STAT_SCSI_LU_ATTR_RO(prod);
 static ssize_t target_stat_scsi_lu_show_attr_rev(
        struct se_dev_stat_grps *sgrps, char *page)
 {
-       struct se_subsystem_dev *se_subdev = container_of(sgrps,
-                       struct se_subsystem_dev, dev_stat_grps);
-       struct se_device *dev = se_subdev->se_dev_ptr;
+       struct se_device *dev =
+               container_of(sgrps, struct se_device, dev_stat_grps);
        int i;
-       char str[sizeof(dev->se_sub_dev->t10_wwn.revision)+1];
-
-       if (!dev)
-               return -ENODEV;
+       char str[sizeof(dev->t10_wwn.revision)+1];
 
        /* scsiLuRevisionId */
-       for (i = 0; i < sizeof(dev->se_sub_dev->t10_wwn.revision); i++)
-               str[i] = ISPRINT(dev->se_sub_dev->t10_wwn.revision[i]) ?
-                       dev->se_sub_dev->t10_wwn.revision[i] : ' ';
+       for (i = 0; i < sizeof(dev->t10_wwn.revision); i++)
+               str[i] = ISPRINT(dev->t10_wwn.revision[i]) ?
+                       dev->t10_wwn.revision[i] : ' ';
        str[i] = '\0';
        return snprintf(page, PAGE_SIZE, "%s\n", str);
 }
@@ -471,12 +361,8 @@ DEV_STAT_SCSI_LU_ATTR_RO(rev);
 static ssize_t target_stat_scsi_lu_show_attr_dev_type(
        struct se_dev_stat_grps *sgrps, char *page)
 {
-       struct se_subsystem_dev *se_subdev = container_of(sgrps,
-                       struct se_subsystem_dev, dev_stat_grps);
-       struct se_device *dev = se_subdev->se_dev_ptr;
-
-       if (!dev)
-               return -ENODEV;
+       struct se_device *dev =
+               container_of(sgrps, struct se_device, dev_stat_grps);
 
        /* scsiLuPeripheralType */
        return snprintf(page, PAGE_SIZE, "%u\n",
@@ -487,30 +373,18 @@ DEV_STAT_SCSI_LU_ATTR_RO(dev_type);
 static ssize_t target_stat_scsi_lu_show_attr_status(
        struct se_dev_stat_grps *sgrps, char *page)
 {
-       struct se_subsystem_dev *se_subdev = container_of(sgrps,
-                       struct se_subsystem_dev, dev_stat_grps);
-       struct se_device *dev = se_subdev->se_dev_ptr;
-
-       if (!dev)
-               return -ENODEV;
+       struct se_device *dev =
+               container_of(sgrps, struct se_device, dev_stat_grps);
 
        /* scsiLuStatus */
        return snprintf(page, PAGE_SIZE, "%s\n",
-               (dev->dev_status == TRANSPORT_DEVICE_ACTIVATED) ?
-               "available" : "notavailable");
+               (dev->export_count) ? "available" : "notavailable");
 }
 DEV_STAT_SCSI_LU_ATTR_RO(status);
 
 static ssize_t target_stat_scsi_lu_show_attr_state_bit(
        struct se_dev_stat_grps *sgrps, char *page)
 {
-       struct se_subsystem_dev *se_subdev = container_of(sgrps,
-                       struct se_subsystem_dev, dev_stat_grps);
-       struct se_device *dev = se_subdev->se_dev_ptr;
-
-       if (!dev)
-               return -ENODEV;
-
        /* scsiLuState */
        return snprintf(page, PAGE_SIZE, "exposed\n");
 }
@@ -519,12 +393,8 @@ DEV_STAT_SCSI_LU_ATTR_RO(state_bit);
 static ssize_t target_stat_scsi_lu_show_attr_num_cmds(
        struct se_dev_stat_grps *sgrps, char *page)
 {
-       struct se_subsystem_dev *se_subdev = container_of(sgrps,
-                       struct se_subsystem_dev, dev_stat_grps);
-       struct se_device *dev = se_subdev->se_dev_ptr;
-
-       if (!dev)
-               return -ENODEV;
+       struct se_device *dev =
+               container_of(sgrps, struct se_device, dev_stat_grps);
 
        /* scsiLuNumCommands */
        return snprintf(page, PAGE_SIZE, "%llu\n",
@@ -535,12 +405,8 @@ DEV_STAT_SCSI_LU_ATTR_RO(num_cmds);
 static ssize_t target_stat_scsi_lu_show_attr_read_mbytes(
        struct se_dev_stat_grps *sgrps, char *page)
 {
-       struct se_subsystem_dev *se_subdev = container_of(sgrps,
-                       struct se_subsystem_dev, dev_stat_grps);
-       struct se_device *dev = se_subdev->se_dev_ptr;
-
-       if (!dev)
-               return -ENODEV;
+       struct se_device *dev =
+               container_of(sgrps, struct se_device, dev_stat_grps);
 
        /* scsiLuReadMegaBytes */
        return snprintf(page, PAGE_SIZE, "%u\n", (u32)(dev->read_bytes >> 20));
@@ -550,12 +416,8 @@ DEV_STAT_SCSI_LU_ATTR_RO(read_mbytes);
 static ssize_t target_stat_scsi_lu_show_attr_write_mbytes(
        struct se_dev_stat_grps *sgrps, char *page)
 {
-       struct se_subsystem_dev *se_subdev = container_of(sgrps,
-                       struct se_subsystem_dev, dev_stat_grps);
-       struct se_device *dev = se_subdev->se_dev_ptr;
-
-       if (!dev)
-               return -ENODEV;
+       struct se_device *dev =
+               container_of(sgrps, struct se_device, dev_stat_grps);
 
        /* scsiLuWrittenMegaBytes */
        return snprintf(page, PAGE_SIZE, "%u\n", (u32)(dev->write_bytes >> 20));
@@ -565,12 +427,8 @@ DEV_STAT_SCSI_LU_ATTR_RO(write_mbytes);
 static ssize_t target_stat_scsi_lu_show_attr_resets(
        struct se_dev_stat_grps *sgrps, char *page)
 {
-       struct se_subsystem_dev *se_subdev = container_of(sgrps,
-                       struct se_subsystem_dev, dev_stat_grps);
-       struct se_device *dev = se_subdev->se_dev_ptr;
-
-       if (!dev)
-               return -ENODEV;
+       struct se_device *dev =
+               container_of(sgrps, struct se_device, dev_stat_grps);
 
        /* scsiLuInResets */
        return snprintf(page, PAGE_SIZE, "%u\n", dev->num_resets);
@@ -580,13 +438,6 @@ DEV_STAT_SCSI_LU_ATTR_RO(resets);
 static ssize_t target_stat_scsi_lu_show_attr_full_stat(
        struct se_dev_stat_grps *sgrps, char *page)
 {
-       struct se_subsystem_dev *se_subdev = container_of(sgrps,
-                       struct se_subsystem_dev, dev_stat_grps);
-       struct se_device *dev = se_subdev->se_dev_ptr;
-
-       if (!dev)
-               return -ENODEV;
-
        /* FIXME: scsiLuOutTaskSetFullStatus */
        return snprintf(page, PAGE_SIZE, "%u\n", 0);
 }
@@ -595,13 +446,6 @@ DEV_STAT_SCSI_LU_ATTR_RO(full_stat);
 static ssize_t target_stat_scsi_lu_show_attr_hs_num_cmds(
        struct se_dev_stat_grps *sgrps, char *page)
 {
-       struct se_subsystem_dev *se_subdev = container_of(sgrps,
-                       struct se_subsystem_dev, dev_stat_grps);
-       struct se_device *dev = se_subdev->se_dev_ptr;
-
-       if (!dev)
-               return -ENODEV;
-
        /* FIXME: scsiLuHSInCommands */
        return snprintf(page, PAGE_SIZE, "%u\n", 0);
 }
@@ -610,12 +454,8 @@ DEV_STAT_SCSI_LU_ATTR_RO(hs_num_cmds);
 static ssize_t target_stat_scsi_lu_show_attr_creation_time(
        struct se_dev_stat_grps *sgrps, char *page)
 {
-       struct se_subsystem_dev *se_subdev = container_of(sgrps,
-                       struct se_subsystem_dev, dev_stat_grps);
-       struct se_device *dev = se_subdev->se_dev_ptr;
-
-       if (!dev)
-               return -ENODEV;
+       struct se_device *dev =
+               container_of(sgrps, struct se_device, dev_stat_grps);
 
        /* scsiLuCreationTime */
        return snprintf(page, PAGE_SIZE, "%u\n", (u32)(((u32)dev->creation_time -
@@ -662,20 +502,20 @@ static struct config_item_type target_stat_scsi_lu_cit = {
  * Called from target_core_configfs.c:target_core_make_subdev() to setup
  * the target statistics groups + configfs CITs located in target_core_stat.c
  */
-void target_stat_setup_dev_default_groups(struct se_subsystem_dev *se_subdev)
+void target_stat_setup_dev_default_groups(struct se_device *dev)
 {
-       struct config_group *dev_stat_grp = &se_subdev->dev_stat_grps.stat_group;
+       struct config_group *dev_stat_grp = &dev->dev_stat_grps.stat_group;
 
-       config_group_init_type_name(&se_subdev->dev_stat_grps.scsi_dev_group,
+       config_group_init_type_name(&dev->dev_stat_grps.scsi_dev_group,
                        "scsi_dev", &target_stat_scsi_dev_cit);
-       config_group_init_type_name(&se_subdev->dev_stat_grps.scsi_tgt_dev_group,
+       config_group_init_type_name(&dev->dev_stat_grps.scsi_tgt_dev_group,
                        "scsi_tgt_dev", &target_stat_scsi_tgt_dev_cit);
-       config_group_init_type_name(&se_subdev->dev_stat_grps.scsi_lu_group,
+       config_group_init_type_name(&dev->dev_stat_grps.scsi_lu_group,
                        "scsi_lu", &target_stat_scsi_lu_cit);
 
-       dev_stat_grp->default_groups[0] = &se_subdev->dev_stat_grps.scsi_dev_group;
-       dev_stat_grp->default_groups[1] = &se_subdev->dev_stat_grps.scsi_tgt_dev_group;
-       dev_stat_grp->default_groups[2] = &se_subdev->dev_stat_grps.scsi_lu_group;
+       dev_stat_grp->default_groups[0] = &dev->dev_stat_grps.scsi_dev_group;
+       dev_stat_grp->default_groups[1] = &dev->dev_stat_grps.scsi_tgt_dev_group;
+       dev_stat_grp->default_groups[2] = &dev->dev_stat_grps.scsi_lu_group;
        dev_stat_grp->default_groups[3] = NULL;
 }
 
@@ -1161,7 +1001,7 @@ static ssize_t target_stat_scsi_transport_show_attr_dev_name(
                return -ENODEV;
        }
        tpg = sep->sep_tpg;
-       wwn = &dev->se_sub_dev->t10_wwn;
+       wwn = &dev->t10_wwn;
        /* scsiTransportDevName */
        ret = snprintf(page, PAGE_SIZE, "%s+%s\n",
                        tpg->se_tpg_tfo->tpg_get_wwn(tpg),
index be75c43..c6e0293 100644 (file)
@@ -3,8 +3,7 @@
  *
  * This file contains SPC-3 task management infrastructure
  *
- * Copyright (c) 2009,2010 Rising Tide Systems
- * Copyright (c) 2009,2010 Linux-iSCSI.org
+ * (c) Copyright 2009-2012 RisingTide Systems LLC.
  *
  * Nicholas A. Bellinger <nab@kernel.org>
  *
@@ -371,7 +370,7 @@ int core_tmr_lun_reset(
         * which the command was received shall be completed with TASK ABORTED
         * status (see SAM-4).
         */
-       tas = dev->se_sub_dev->se_dev_attrib.emulate_tas;
+       tas = dev->dev_attrib.emulate_tas;
        /*
         * Determine if this se_tmr is coming from a $FABRIC_MOD
         * or struct se_device passthrough..
@@ -399,10 +398,10 @@ int core_tmr_lun_reset(
         * LOGICAL UNIT RESET
         */
        if (!preempt_and_abort_list &&
-            (dev->dev_flags & DF_SPC2_RESERVATIONS)) {
+            (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)) {
                spin_lock(&dev->dev_reservation_lock);
                dev->dev_reserved_node_acl = NULL;
-               dev->dev_flags &= ~DF_SPC2_RESERVATIONS;
+               dev->dev_reservation_flags &= ~DRF_SPC2_RESERVATIONS;
                spin_unlock(&dev->dev_reservation_lock);
                pr_debug("LUN_RESET: SCSI-2 Released reservation\n");
        }
index a531fe2..5192ac0 100644 (file)
@@ -3,10 +3,7 @@
  *
  * This file contains generic Target Portal Group related functions.
  *
- * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
- * Copyright (c) 2005, 2006, 2007 SBE, Inc.
- * Copyright (c) 2007-2010 Rising Tide Systems
- * Copyright (c) 2008-2010 Linux-iSCSI.org
+ * (c) Copyright 2002-2012 RisingTide Systems LLC.
  *
  * Nicholas A. Bellinger <nab@kernel.org>
  *
@@ -619,6 +616,29 @@ int core_tpg_set_initiator_node_queue_depth(
 }
 EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
 
+/*     core_tpg_set_initiator_node_tag():
+ *
+ *     Initiator nodeacl tags are not used internally, but may be used by
+ *     userspace to emulate aliases or groups.
+ *     Returns length of newly-set tag or -EINVAL.
+ */
+int core_tpg_set_initiator_node_tag(
+       struct se_portal_group *tpg,
+       struct se_node_acl *acl,
+       const char *new_tag)
+{
+       if (strlen(new_tag) >= MAX_ACL_TAG_SIZE)
+               return -EINVAL;
+
+       if (!strncmp("NULL", new_tag, 4)) {
+               acl->acl_tag[0] = '\0';
+               return 0;
+       }
+
+       return snprintf(acl->acl_tag, MAX_ACL_TAG_SIZE, "%s", new_tag);
+}
+EXPORT_SYMBOL(core_tpg_set_initiator_node_tag);
+
 static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
 {
        /* Set in core_dev_setup_virtual_lun0() */
@@ -672,6 +692,7 @@ int core_tpg_register(
        for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
                lun = se_tpg->tpg_lun_list[i];
                lun->unpacked_lun = i;
+               lun->lun_link_magic = SE_LUN_LINK_MAGIC;
                lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
                atomic_set(&lun->lun_acl_count, 0);
                init_completion(&lun->lun_shutdown_comp);
index dcecbfb..c23c76c 100644 (file)
@@ -3,10 +3,7 @@
  *
  * This file contains the Generic Target Engine Core.
  *
- * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
- * Copyright (c) 2005, 2006, 2007 SBE, Inc.
- * Copyright (c) 2007-2010 Rising Tide Systems
- * Copyright (c) 2008-2010 Linux-iSCSI.org
+ * (c) Copyright 2002-2012 RisingTide Systems LLC.
  *
  * Nicholas A. Bellinger <nab@kernel.org>
  *
@@ -70,7 +67,6 @@ static void transport_handle_queue_full(struct se_cmd *cmd,
 static int transport_generic_get_mem(struct se_cmd *cmd);
 static int target_get_sess_cmd(struct se_session *, struct se_cmd *, bool);
 static void transport_put_cmd(struct se_cmd *cmd);
-static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
 static void target_complete_ok_work(struct work_struct *work);
 
 int init_se_kmem_caches(void)
@@ -297,7 +293,7 @@ void transport_register_session(
 }
 EXPORT_SYMBOL(transport_register_session);
 
-void target_release_session(struct kref *kref)
+static void target_release_session(struct kref *kref)
 {
        struct se_session *se_sess = container_of(kref,
                        struct se_session, sess_kref);
@@ -558,7 +554,8 @@ static void target_complete_failure_work(struct work_struct *work)
 {
        struct se_cmd *cmd = container_of(work, struct se_cmd, work);
 
-       transport_generic_request_failure(cmd);
+       transport_generic_request_failure(cmd,
+                       TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE);
 }
 
 /*
@@ -626,7 +623,6 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
                complete(&cmd->t_transport_stop_comp);
                return;
        } else if (cmd->transport_state & CMD_T_FAILED) {
-               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
                INIT_WORK(&cmd->work, target_complete_failure_work);
        } else {
                INIT_WORK(&cmd->work, target_complete_ok_work);
@@ -659,7 +655,7 @@ static void target_add_to_state_list(struct se_cmd *cmd)
 static void transport_write_pending_qf(struct se_cmd *cmd);
 static void transport_complete_qf(struct se_cmd *cmd);
 
-static void target_qf_do_work(struct work_struct *work)
+void target_qf_do_work(struct work_struct *work)
 {
        struct se_device *dev = container_of(work, struct se_device,
                                        qf_work_queue);
@@ -712,29 +708,15 @@ void transport_dump_dev_state(
        int *bl)
 {
        *bl += sprintf(b + *bl, "Status: ");
-       switch (dev->dev_status) {
-       case TRANSPORT_DEVICE_ACTIVATED:
+       if (dev->export_count)
                *bl += sprintf(b + *bl, "ACTIVATED");
-               break;
-       case TRANSPORT_DEVICE_DEACTIVATED:
+       else
                *bl += sprintf(b + *bl, "DEACTIVATED");
-               break;
-       case TRANSPORT_DEVICE_SHUTDOWN:
-               *bl += sprintf(b + *bl, "SHUTDOWN");
-               break;
-       case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
-       case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
-               *bl += sprintf(b + *bl, "OFFLINE");
-               break;
-       default:
-               *bl += sprintf(b + *bl, "UNKNOWN=%d", dev->dev_status);
-               break;
-       }
 
        *bl += sprintf(b + *bl, "  Max Queue Depth: %d", dev->queue_depth);
        *bl += sprintf(b + *bl, "  SectorSize: %u  HwMaxSectors: %u\n",
-               dev->se_sub_dev->se_dev_attrib.block_size,
-               dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
+               dev->dev_attrib.block_size,
+               dev->dev_attrib.hw_max_sectors);
        *bl += sprintf(b + *bl, "        ");
 }
 
@@ -991,186 +973,8 @@ transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
 }
 EXPORT_SYMBOL(transport_set_vpd_ident);
 
-static void core_setup_task_attr_emulation(struct se_device *dev)
-{
-       /*
-        * If this device is from Target_Core_Mod/pSCSI, disable the
-        * SAM Task Attribute emulation.
-        *
-        * This is currently not available in upsream Linux/SCSI Target
-        * mode code, and is assumed to be disabled while using TCM/pSCSI.
-        */
-       if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
-               dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH;
-               return;
-       }
-
-       dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED;
-       pr_debug("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x"
-               " device\n", dev->transport->name,
-               dev->transport->get_device_rev(dev));
-}
-
-static void scsi_dump_inquiry(struct se_device *dev)
-{
-       struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn;
-       char buf[17];
-       int i, device_type;
-       /*
-        * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
-        */
-       for (i = 0; i < 8; i++)
-               if (wwn->vendor[i] >= 0x20)
-                       buf[i] = wwn->vendor[i];
-               else
-                       buf[i] = ' ';
-       buf[i] = '\0';
-       pr_debug("  Vendor: %s\n", buf);
-
-       for (i = 0; i < 16; i++)
-               if (wwn->model[i] >= 0x20)
-                       buf[i] = wwn->model[i];
-               else
-                       buf[i] = ' ';
-       buf[i] = '\0';
-       pr_debug("  Model: %s\n", buf);
-
-       for (i = 0; i < 4; i++)
-               if (wwn->revision[i] >= 0x20)
-                       buf[i] = wwn->revision[i];
-               else
-                       buf[i] = ' ';
-       buf[i] = '\0';
-       pr_debug("  Revision: %s\n", buf);
-
-       device_type = dev->transport->get_device_type(dev);
-       pr_debug("  Type:   %s ", scsi_device_type(device_type));
-       pr_debug("                 ANSI SCSI revision: %02x\n",
-                               dev->transport->get_device_rev(dev));
-}
-
-struct se_device *transport_add_device_to_core_hba(
-       struct se_hba *hba,
-       struct se_subsystem_api *transport,
-       struct se_subsystem_dev *se_dev,
-       u32 device_flags,
-       void *transport_dev,
-       struct se_dev_limits *dev_limits,
-       const char *inquiry_prod,
-       const char *inquiry_rev)
-{
-       int force_pt;
-       struct se_device  *dev;
-
-       dev = kzalloc(sizeof(struct se_device), GFP_KERNEL);
-       if (!dev) {
-               pr_err("Unable to allocate memory for se_dev_t\n");
-               return NULL;
-       }
-
-       dev->dev_flags          = device_flags;
-       dev->dev_status         |= TRANSPORT_DEVICE_DEACTIVATED;
-       dev->dev_ptr            = transport_dev;
-       dev->se_hba             = hba;
-       dev->se_sub_dev         = se_dev;
-       dev->transport          = transport;
-       INIT_LIST_HEAD(&dev->dev_list);
-       INIT_LIST_HEAD(&dev->dev_sep_list);
-       INIT_LIST_HEAD(&dev->dev_tmr_list);
-       INIT_LIST_HEAD(&dev->delayed_cmd_list);
-       INIT_LIST_HEAD(&dev->state_list);
-       INIT_LIST_HEAD(&dev->qf_cmd_list);
-       spin_lock_init(&dev->execute_task_lock);
-       spin_lock_init(&dev->delayed_cmd_lock);
-       spin_lock_init(&dev->dev_reservation_lock);
-       spin_lock_init(&dev->dev_status_lock);
-       spin_lock_init(&dev->se_port_lock);
-       spin_lock_init(&dev->se_tmr_lock);
-       spin_lock_init(&dev->qf_cmd_lock);
-       atomic_set(&dev->dev_ordered_id, 0);
-
-       se_dev_set_default_attribs(dev, dev_limits);
-
-       dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
-       dev->creation_time = get_jiffies_64();
-       spin_lock_init(&dev->stats_lock);
-
-       spin_lock(&hba->device_lock);
-       list_add_tail(&dev->dev_list, &hba->hba_dev_list);
-       hba->dev_count++;
-       spin_unlock(&hba->device_lock);
-       /*
-        * Setup the SAM Task Attribute emulation for struct se_device
-        */
-       core_setup_task_attr_emulation(dev);
-       /*
-        * Force PR and ALUA passthrough emulation with internal object use.
-        */
-       force_pt = (hba->hba_flags & HBA_FLAGS_INTERNAL_USE);
-       /*
-        * Setup the Reservations infrastructure for struct se_device
-        */
-       core_setup_reservations(dev, force_pt);
-       /*
-        * Setup the Asymmetric Logical Unit Assignment for struct se_device
-        */
-       if (core_setup_alua(dev, force_pt) < 0)
-               goto err_dev_list;
-
-       /*
-        * Startup the struct se_device processing thread
-        */
-       dev->tmr_wq = alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 1,
-                                     dev->transport->name);
-       if (!dev->tmr_wq) {
-               pr_err("Unable to create tmr workqueue for %s\n",
-                       dev->transport->name);
-               goto err_dev_list;
-       }
-       /*
-        * Setup work_queue for QUEUE_FULL
-        */
-       INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
-       /*
-        * Preload the initial INQUIRY const values if we are doing
-        * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
-        * passthrough because this is being provided by the backend LLD.
-        * This is required so that transport_get_inquiry() copies these
-        * originals once back into DEV_T10_WWN(dev) for the virtual device
-        * setup.
-        */
-       if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
-               if (!inquiry_prod || !inquiry_rev) {
-                       pr_err("All non TCM/pSCSI plugins require"
-                               " INQUIRY consts\n");
-                       goto err_wq;
-               }
-
-               strncpy(&dev->se_sub_dev->t10_wwn.vendor[0], "LIO-ORG", 8);
-               strncpy(&dev->se_sub_dev->t10_wwn.model[0], inquiry_prod, 16);
-               strncpy(&dev->se_sub_dev->t10_wwn.revision[0], inquiry_rev, 4);
-       }
-       scsi_dump_inquiry(dev);
-
-       return dev;
-
-err_wq:
-       destroy_workqueue(dev->tmr_wq);
-err_dev_list:
-       spin_lock(&hba->device_lock);
-       list_del(&dev->dev_list);
-       hba->dev_count--;
-       spin_unlock(&hba->device_lock);
-
-       se_release_vpd_for_dev(dev);
-
-       kfree(dev);
-
-       return NULL;
-}
-EXPORT_SYMBOL(transport_add_device_to_core_hba);
-
-int target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
+sense_reason_t
+target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
 {
        struct se_device *dev = cmd->se_dev;
 
@@ -1185,18 +989,18 @@ int target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
                if (cmd->data_direction == DMA_TO_DEVICE) {
                        pr_err("Rejecting underflow/overflow"
                                        " WRITE data\n");
-                       goto out_invalid_cdb_field;
+                       return TCM_INVALID_CDB_FIELD;
                }
                /*
                 * Reject READ_* or WRITE_* with overflow/underflow for
                 * type SCF_SCSI_DATA_CDB.
                 */
-               if (dev->se_sub_dev->se_dev_attrib.block_size != 512)  {
+               if (dev->dev_attrib.block_size != 512)  {
                        pr_err("Failing OVERFLOW/UNDERFLOW for LBA op"
                                " CDB on non 512-byte sector setup subsystem"
                                " plugin: %s\n", dev->transport->name);
                        /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
-                       goto out_invalid_cdb_field;
+                       return TCM_INVALID_CDB_FIELD;
                }
                /*
                 * For the overflow case keep the existing fabric provided
@@ -1216,10 +1020,6 @@ int target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
 
        return 0;
 
-out_invalid_cdb_field:
-       cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
-       cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
-       return -EINVAL;
 }
 
 /*
@@ -1259,45 +1059,41 @@ void transport_init_se_cmd(
 }
 EXPORT_SYMBOL(transport_init_se_cmd);
 
-static int transport_check_alloc_task_attr(struct se_cmd *cmd)
+static sense_reason_t
+transport_check_alloc_task_attr(struct se_cmd *cmd)
 {
+       struct se_device *dev = cmd->se_dev;
+
        /*
         * Check if SAM Task Attribute emulation is enabled for this
         * struct se_device storage object
         */
-       if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
+       if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
                return 0;
 
        if (cmd->sam_task_attr == MSG_ACA_TAG) {
                pr_debug("SAM Task Attribute ACA"
                        " emulation is not supported\n");
-               return -EINVAL;
+               return TCM_INVALID_CDB_FIELD;
        }
        /*
         * Used to determine when ORDERED commands should go from
         * Dormant to Active status.
         */
-       cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
+       cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
        smp_mb__after_atomic_inc();
        pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
                        cmd->se_ordered_id, cmd->sam_task_attr,
-                       cmd->se_dev->transport->name);
+                       dev->transport->name);
        return 0;
 }
 
-/*     target_setup_cmd_from_cdb():
- *
- *     Called from fabric RX Thread.
- */
-int target_setup_cmd_from_cdb(
-       struct se_cmd *cmd,
-       unsigned char *cdb)
+sense_reason_t
+target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb)
 {
-       struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
-       u32 pr_reg_type = 0;
-       u8 alua_ascq = 0;
+       struct se_device *dev = cmd->se_dev;
        unsigned long flags;
-       int ret;
+       sense_reason_t ret;
 
        /*
         * Ensure that the received CDB is less than the max (252 + 8) bytes
@@ -1307,9 +1103,7 @@ int target_setup_cmd_from_cdb(
                pr_err("Received SCSI CDB with command_size: %d that"
                        " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
                        scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
-               cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
-               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
-               return -EINVAL;
+               return TCM_INVALID_CDB_FIELD;
        }
        /*
         * If the received CDB is larger than TCM_MAX_COMMAND_SIZE,
@@ -1324,10 +1118,7 @@ int target_setup_cmd_from_cdb(
                                " %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
                                scsi_command_size(cdb),
                                (unsigned long)sizeof(cmd->__t_task_cdb));
-                       cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
-                       cmd->scsi_sense_reason =
-                                       TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-                       return -ENOMEM;
+                       return TCM_OUT_OF_RESOURCES;
                }
        } else
                cmd->t_task_cdb = &cmd->__t_task_cdb[0];
@@ -1339,70 +1130,30 @@ int target_setup_cmd_from_cdb(
        /*
         * Check for an existing UNIT ATTENTION condition
         */
-       if (core_scsi3_ua_check(cmd, cdb) < 0) {
-               cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
-               cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION;
-               return -EINVAL;
-       }
+       ret = target_scsi3_ua_check(cmd);
+       if (ret)
+               return ret;
 
-       ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq);
-       if (ret != 0) {
-               /*
-                * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
-                * The ALUA additional sense code qualifier (ASCQ) is determined
-                * by the ALUA primary or secondary access state..
-                */
-               if (ret > 0) {
-                       pr_debug("[%s]: ALUA TG Port not available, "
-                               "SenseKey: NOT_READY, ASC/ASCQ: "
-                               "0x04/0x%02x\n",
-                               cmd->se_tfo->get_fabric_name(), alua_ascq);
-
-                       transport_set_sense_codes(cmd, 0x04, alua_ascq);
-                       cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
-                       cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY;
-                       return -EINVAL;
-               }
-               cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
-               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
-               return -EINVAL;
-       }
+       ret = target_alua_state_check(cmd);
+       if (ret)
+               return ret;
 
-       /*
-        * Check status for SPC-3 Persistent Reservations
-        */
-       if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type)) {
-               if (su_dev->t10_pr.pr_ops.t10_seq_non_holder(
-                                       cmd, cdb, pr_reg_type) != 0) {
-                       cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
-                       cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
-                       cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
-                       cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
-                       return -EBUSY;
-               }
-               /*
-                * This means the CDB is allowed for the SCSI Initiator port
-                * when said port is *NOT* holding the legacy SPC-2 or
-                * SPC-3 Persistent Reservation.
-                */
-       }
+       ret = target_check_reservation(cmd);
+       if (ret)
+               return ret;
 
-       ret = cmd->se_dev->transport->parse_cdb(cmd);
-       if (ret < 0)
+       ret = dev->transport->parse_cdb(cmd);
+       if (ret)
+               return ret;
+
+       ret = transport_check_alloc_task_attr(cmd);
+       if (ret)
                return ret;
 
        spin_lock_irqsave(&cmd->t_state_lock, flags);
        cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
        spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 
-       /*
-        * Check for SAM Task Attribute Emulation
-        */
-       if (transport_check_alloc_task_attr(cmd) < 0) {
-               cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
-               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
-               return -EINVAL;
-       }
        spin_lock(&cmd->se_lun->lun_sep_lock);
        if (cmd->se_lun->lun_sep)
                cmd->se_lun->lun_sep->sep_stats.cmd_pdus++;
@@ -1418,7 +1169,7 @@ EXPORT_SYMBOL(target_setup_cmd_from_cdb);
 int transport_handle_cdb_direct(
        struct se_cmd *cmd)
 {
-       int ret;
+       sense_reason_t ret;
 
        if (!cmd->se_lun) {
                dump_stack();
@@ -1448,13 +1199,41 @@ int transport_handle_cdb_direct(
         * and call transport_generic_request_failure() if necessary..
         */
        ret = transport_generic_new_cmd(cmd);
-       if (ret < 0)
-               transport_generic_request_failure(cmd);
-
+       if (ret)
+               transport_generic_request_failure(cmd, ret);
        return 0;
 }
 EXPORT_SYMBOL(transport_handle_cdb_direct);
 
+static sense_reason_t
+transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
+               u32 sgl_count, struct scatterlist *sgl_bidi, u32 sgl_bidi_count)
+{
+       if (!sgl || !sgl_count)
+               return 0;
+
+       /*
+        * Reject SCSI data overflow with map_mem_to_cmd() as incoming
+        * scatterlists already have been set to follow what the fabric
+        * passes for the original expected data transfer length.
+        */
+       if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
+               pr_warn("Rejecting SCSI DATA overflow for fabric using"
+                       " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
+               return TCM_INVALID_CDB_FIELD;
+       }
+
+       cmd->t_data_sg = sgl;
+       cmd->t_data_nents = sgl_count;
+
+       if (sgl_bidi && sgl_bidi_count) {
+               cmd->t_bidi_data_sg = sgl_bidi;
+               cmd->t_bidi_data_nents = sgl_bidi_count;
+       }
+       cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
+       return 0;
+}
+
 /*
  * target_submit_cmd_map_sgls - lookup unpacked lun and submit uninitialized
  *                      se_cmd + use pre-allocated SGL memory.
@@ -1487,7 +1266,8 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
                struct scatterlist *sgl_bidi, u32 sgl_bidi_count)
 {
        struct se_portal_group *se_tpg;
-       int rc;
+       sense_reason_t rc;
+       int ret;
 
        se_tpg = se_sess->se_tpg;
        BUG_ON(!se_tpg);
@@ -1508,9 +1288,9 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
         * for fabrics using TARGET_SCF_ACK_KREF that expect a second
         * kref_put() to happen during fabric packet acknowledgement.
         */
-       rc = target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
-       if (rc)
-               return rc;
+       ret = target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
+       if (ret)
+               return ret;
        /*
         * Signal bidirectional data payloads to target-core
         */
@@ -1519,16 +1299,16 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
        /*
         * Locate se_lun pointer and attach it to struct se_cmd
         */
-       if (transport_lookup_cmd_lun(se_cmd, unpacked_lun) < 0) {
-               transport_send_check_condition_and_sense(se_cmd,
-                               se_cmd->scsi_sense_reason, 0);
+       rc = transport_lookup_cmd_lun(se_cmd, unpacked_lun);
+       if (rc) {
+               transport_send_check_condition_and_sense(se_cmd, rc, 0);
                target_put_sess_cmd(se_sess, se_cmd);
                return 0;
        }
 
        rc = target_setup_cmd_from_cdb(se_cmd, cdb);
        if (rc != 0) {
-               transport_generic_request_failure(se_cmd);
+               transport_generic_request_failure(se_cmd, rc);
                return 0;
        }
        /*
@@ -1563,7 +1343,7 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
                rc = transport_generic_map_mem_to_cmd(se_cmd, sgl, sgl_count,
                                sgl_bidi, sgl_bidi_count);
                if (rc != 0) {
-                       transport_generic_request_failure(se_cmd);
+                       transport_generic_request_failure(se_cmd, rc);
                        return 0;
                }
        }
@@ -1709,16 +1489,17 @@ bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags)
 /*
  * Handle SAM-esque emulation for generic transport request failures.
  */
-void transport_generic_request_failure(struct se_cmd *cmd)
+void transport_generic_request_failure(struct se_cmd *cmd,
+               sense_reason_t sense_reason)
 {
        int ret = 0;
 
        pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
                " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
                cmd->t_task_cdb[0]);
-       pr_debug("-----[ i_state: %d t_state: %d scsi_sense_reason: %d\n",
+       pr_debug("-----[ i_state: %d t_state: %d sense_reason: %d\n",
                cmd->se_tfo->get_cmd_state(cmd),
-               cmd->t_state, cmd->scsi_sense_reason);
+               cmd->t_state, sense_reason);
        pr_debug("-----[ CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n",
                (cmd->transport_state & CMD_T_ACTIVE) != 0,
                (cmd->transport_state & CMD_T_STOP) != 0,
@@ -1727,10 +1508,9 @@ void transport_generic_request_failure(struct se_cmd *cmd)
        /*
         * For SAM Task Attribute emulation for failed struct se_cmd
         */
-       if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
-               transport_complete_task_attr(cmd);
+       transport_complete_task_attr(cmd);
 
-       switch (cmd->scsi_sense_reason) {
+       switch (sense_reason) {
        case TCM_NON_EXISTENT_LUN:
        case TCM_UNSUPPORTED_SCSI_OPCODE:
        case TCM_INVALID_CDB_FIELD:
@@ -1743,6 +1523,9 @@ void transport_generic_request_failure(struct se_cmd *cmd)
        case TCM_CHECK_CONDITION_UNIT_ATTENTION:
        case TCM_CHECK_CONDITION_NOT_READY:
                break;
+       case TCM_OUT_OF_RESOURCES:
+               sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               break;
        case TCM_RESERVATION_CONFLICT:
                /*
                 * No SENSE Data payload for this case, set SCSI Status
@@ -1759,7 +1542,7 @@ void transport_generic_request_failure(struct se_cmd *cmd)
                 * See spc4r17, section 7.4.6 Control Mode Page, Table 349
                 */
                if (cmd->se_sess &&
-                   cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2)
+                   cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl == 2)
                        core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
                                cmd->orig_fe_lun, 0x2C,
                                ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
@@ -1770,13 +1553,12 @@ void transport_generic_request_failure(struct se_cmd *cmd)
                goto check_stop;
        default:
                pr_err("Unknown transport error for CDB 0x%02x: %d\n",
-                       cmd->t_task_cdb[0], cmd->scsi_sense_reason);
-               cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+                       cmd->t_task_cdb[0], sense_reason);
+               sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
                break;
        }
 
-       ret = transport_send_check_condition_and_sense(cmd,
-                       cmd->scsi_sense_reason, 0);
+       ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0);
        if (ret == -EAGAIN || ret == -ENOMEM)
                goto queue_full;
 
@@ -1794,69 +1576,30 @@ EXPORT_SYMBOL(transport_generic_request_failure);
 
 static void __target_execute_cmd(struct se_cmd *cmd)
 {
-       int error = 0;
+       sense_reason_t ret;
 
        spin_lock_irq(&cmd->t_state_lock);
        cmd->transport_state |= (CMD_T_BUSY|CMD_T_SENT);
        spin_unlock_irq(&cmd->t_state_lock);
 
-       if (cmd->execute_cmd)
-               error = cmd->execute_cmd(cmd);
+       if (cmd->execute_cmd) {
+               ret = cmd->execute_cmd(cmd);
+               if (ret) {
+                       spin_lock_irq(&cmd->t_state_lock);
+                       cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT);
+                       spin_unlock_irq(&cmd->t_state_lock);
 
-       if (error) {
-               spin_lock_irq(&cmd->t_state_lock);
-               cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT);
-               spin_unlock_irq(&cmd->t_state_lock);
-
-               transport_generic_request_failure(cmd);
+                       transport_generic_request_failure(cmd, ret);
+               }
        }
 }
 
-void target_execute_cmd(struct se_cmd *cmd)
+static bool target_handle_task_attr(struct se_cmd *cmd)
 {
        struct se_device *dev = cmd->se_dev;
 
-       /*
-        * If the received CDB has aleady been aborted stop processing it here.
-        */
-       if (transport_check_aborted_status(cmd, 1)) {
-               complete(&cmd->t_transport_stop_comp);
-               return;
-       }
-
-       /*
-        * Determine if IOCTL context caller in requesting the stopping of this
-        * command for LUN shutdown purposes.
-        */
-       spin_lock_irq(&cmd->t_state_lock);
-       if (cmd->transport_state & CMD_T_LUN_STOP) {
-               pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n",
-                       __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd));
-
-               cmd->transport_state &= ~CMD_T_ACTIVE;
-               spin_unlock_irq(&cmd->t_state_lock);
-               complete(&cmd->transport_lun_stop_comp);
-               return;
-       }
-       /*
-        * Determine if frontend context caller is requesting the stopping of
-        * this command for frontend exceptions.
-        */
-       if (cmd->transport_state & CMD_T_STOP) {
-               pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n",
-                       __func__, __LINE__,
-                       cmd->se_tfo->get_task_tag(cmd));
-
-               spin_unlock_irq(&cmd->t_state_lock);
-               complete(&cmd->t_transport_stop_comp);
-               return;
-       }
-
-       cmd->t_state = TRANSPORT_PROCESSING;
-       spin_unlock_irq(&cmd->t_state_lock);
-
-       if (dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
-               goto execute;
+       if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+               return false;
 
        /*
         * Check for the existence of HEAD_OF_QUEUE, and if true return 1
@@ -1867,7 +1610,7 @@ void target_execute_cmd(struct se_cmd *cmd)
                pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x, "
                         "se_ordered_id: %u\n",
                         cmd->t_task_cdb[0], cmd->se_ordered_id);
-               goto execute;
+               return false;
        case MSG_ORDERED_TAG:
                atomic_inc(&dev->dev_ordered_sync);
                smp_mb__after_atomic_inc();
@@ -1881,7 +1624,7 @@ void target_execute_cmd(struct se_cmd *cmd)
                 * exist that need to be completed first.
                 */
                if (!atomic_read(&dev->simple_cmds))
-                       goto execute;
+                       return false;
                break;
        default:
                /*
@@ -1892,23 +1635,63 @@ void target_execute_cmd(struct se_cmd *cmd)
                break;
        }
 
-       if (atomic_read(&dev->dev_ordered_sync) != 0) {
-               spin_lock(&dev->delayed_cmd_lock);
-               list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list);
-               spin_unlock(&dev->delayed_cmd_lock);
+       if (atomic_read(&dev->dev_ordered_sync) == 0)
+               return false;
 
-               pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to"
-                       " delayed CMD list, se_ordered_id: %u\n",
-                       cmd->t_task_cdb[0], cmd->sam_task_attr,
-                       cmd->se_ordered_id);
+       spin_lock(&dev->delayed_cmd_lock);
+       list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list);
+       spin_unlock(&dev->delayed_cmd_lock);
+
+       pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to"
+               " delayed CMD list, se_ordered_id: %u\n",
+               cmd->t_task_cdb[0], cmd->sam_task_attr,
+               cmd->se_ordered_id);
+       return true;
+}
+
+void target_execute_cmd(struct se_cmd *cmd)
+{
+       /*
+        * If the received CDB has aleady been aborted stop processing it here.
+        */
+       if (transport_check_aborted_status(cmd, 1)) {
+               complete(&cmd->transport_lun_stop_comp);
                return;
        }
 
-execute:
        /*
-        * Otherwise, no ORDERED task attributes exist..
+        * Determine if IOCTL context caller in requesting the stopping of this
+        * command for LUN shutdown purposes.
         */
-       __target_execute_cmd(cmd);
+       spin_lock_irq(&cmd->t_state_lock);
+       if (cmd->transport_state & CMD_T_LUN_STOP) {
+               pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n",
+                       __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd));
+
+               cmd->transport_state &= ~CMD_T_ACTIVE;
+               spin_unlock_irq(&cmd->t_state_lock);
+               complete(&cmd->transport_lun_stop_comp);
+               return;
+       }
+       /*
+        * Determine if frontend context caller is requesting the stopping of
+        * this command for frontend exceptions.
+        */
+       if (cmd->transport_state & CMD_T_STOP) {
+               pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n",
+                       __func__, __LINE__,
+                       cmd->se_tfo->get_task_tag(cmd));
+
+               spin_unlock_irq(&cmd->t_state_lock);
+               complete(&cmd->t_transport_stop_comp);
+               return;
+       }
+
+       cmd->t_state = TRANSPORT_PROCESSING;
+       spin_unlock_irq(&cmd->t_state_lock);
+
+       if (!target_handle_task_attr(cmd))
+               __target_execute_cmd(cmd);
 }
 EXPORT_SYMBOL(target_execute_cmd);
 
@@ -1947,6 +1730,9 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
 {
        struct se_device *dev = cmd->se_dev;
 
+       if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+               return;
+
        if (cmd->sam_task_attr == MSG_SIMPLE_TAG) {
                atomic_dec(&dev->simple_cmds);
                smp_mb__after_atomic_dec();
@@ -1975,8 +1761,7 @@ static void transport_complete_qf(struct se_cmd *cmd)
 {
        int ret = 0;
 
-       if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
-               transport_complete_task_attr(cmd);
+       transport_complete_task_attr(cmd);
 
        if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
                ret = cmd->se_tfo->queue_status(cmd);
@@ -2034,8 +1819,8 @@ static void target_complete_ok_work(struct work_struct *work)
         * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
         * Attribute.
         */
-       if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
-               transport_complete_task_attr(cmd);
+       transport_complete_task_attr(cmd);
+
        /*
         * Check to schedule QUEUE_FULL work, or execute an existing
         * cmd->transport_qf_callback()
@@ -2183,9 +1968,10 @@ static void transport_put_cmd(struct se_cmd *cmd)
        unsigned long flags;
 
        spin_lock_irqsave(&cmd->t_state_lock, flags);
-       if (atomic_read(&cmd->t_fe_count)) {
-               if (!atomic_dec_and_test(&cmd->t_fe_count))
-                       goto out_busy;
+       if (atomic_read(&cmd->t_fe_count) &&
+           !atomic_dec_and_test(&cmd->t_fe_count)) {
+               spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+               return;
        }
 
        if (cmd->transport_state & CMD_T_DEV_ACTIVE) {
@@ -2197,56 +1983,7 @@ static void transport_put_cmd(struct se_cmd *cmd)
        transport_free_pages(cmd);
        transport_release_cmd(cmd);
        return;
-out_busy:
-       spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-}
-
-/*
- * transport_generic_map_mem_to_cmd - Use fabric-alloced pages instead of
- * allocating in the core.
- * @cmd:  Associated se_cmd descriptor
- * @mem:  SGL style memory for TCM WRITE / READ
- * @sg_mem_num: Number of SGL elements
- * @mem_bidi_in: SGL style memory for TCM BIDI READ
- * @sg_mem_bidi_num: Number of BIDI READ SGL elements
- *
- * Return: nonzero return cmd was rejected for -ENOMEM or inproper usage
- * of parameters.
- */
-int transport_generic_map_mem_to_cmd(
-       struct se_cmd *cmd,
-       struct scatterlist *sgl,
-       u32 sgl_count,
-       struct scatterlist *sgl_bidi,
-       u32 sgl_bidi_count)
-{
-       if (!sgl || !sgl_count)
-               return 0;
-
-       /*
-        * Reject SCSI data overflow with map_mem_to_cmd() as incoming
-        * scatterlists already have been set to follow what the fabric
-        * passes for the original expected data transfer length.
-        */
-       if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
-               pr_warn("Rejecting SCSI DATA overflow for fabric using"
-                       " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
-               cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
-               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
-               return -EINVAL;
-       }
-
-       cmd->t_data_sg = sgl;
-       cmd->t_data_nents = sgl_count;
-
-       if (sgl_bidi && sgl_bidi_count) {
-               cmd->t_bidi_data_sg = sgl_bidi;
-               cmd->t_bidi_data_nents = sgl_bidi_count;
-       }
-       cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
-       return 0;
 }
-EXPORT_SYMBOL(transport_generic_map_mem_to_cmd);
 
 void *transport_kmap_data_sg(struct se_cmd *cmd)
 {
@@ -2268,10 +2005,8 @@ void *transport_kmap_data_sg(struct se_cmd *cmd)
 
        /* >1 page. use vmap */
        pages = kmalloc(sizeof(*pages) * cmd->t_data_nents, GFP_KERNEL);
-       if (!pages) {
-               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+       if (!pages)
                return NULL;
-       }
 
        /* convert sg[] to pages[] */
        for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) {
@@ -2280,10 +2015,8 @@ void *transport_kmap_data_sg(struct se_cmd *cmd)
 
        cmd->t_data_vmap = vmap(pages, cmd->t_data_nents,  VM_MAP, PAGE_KERNEL);
        kfree(pages);
-       if (!cmd->t_data_vmap) {
-               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+       if (!cmd->t_data_vmap)
                return NULL;
-       }
 
        return cmd->t_data_vmap + cmd->t_data_sg[0].offset;
 }
@@ -2349,7 +2082,8 @@ out:
  * might not have the payload yet, so notify the fabric via a call to
  * ->write_pending instead. Otherwise place it on the execution queue.
  */
-int transport_generic_new_cmd(struct se_cmd *cmd)
+sense_reason_t
+transport_generic_new_cmd(struct se_cmd *cmd)
 {
        int ret = 0;
 
@@ -2362,7 +2096,7 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
            cmd->data_length) {
                ret = transport_generic_get_mem(cmd);
                if (ret < 0)
-                       goto out_fail;
+                       return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
        }
 
        atomic_inc(&cmd->t_fe_count);
@@ -2388,14 +2122,11 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
        if (ret == -EAGAIN || ret == -ENOMEM)
                goto queue_full;
 
-       if (ret < 0)
-               return ret;
-       return 1;
+       /* fabric drivers should only return -EAGAIN or -ENOMEM as error */
+       WARN_ON(ret);
+
+       return (!ret) ? 0 : TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 
-out_fail:
-       cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
-       cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-       return -EINVAL;
 queue_full:
        pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
        cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
@@ -2839,21 +2570,9 @@ static int transport_get_sense_codes(
        return 0;
 }
 
-static int transport_set_sense_codes(
-       struct se_cmd *cmd,
-       u8 asc,
-       u8 ascq)
-{
-       cmd->scsi_asc = asc;
-       cmd->scsi_ascq = ascq;
-
-       return 0;
-}
-
-int transport_send_check_condition_and_sense(
-       struct se_cmd *cmd,
-       u8 reason,
-       int from_transport)
+int
+transport_send_check_condition_and_sense(struct se_cmd *cmd,
+               sense_reason_t reason, int from_transport)
 {
        unsigned char *buffer = cmd->sense_buffer;
        unsigned long flags;
@@ -3044,23 +2763,19 @@ EXPORT_SYMBOL(transport_send_check_condition_and_sense);
 
 int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
 {
-       int ret = 0;
+       if (!(cmd->transport_state & CMD_T_ABORTED))
+               return 0;
 
-       if (cmd->transport_state & CMD_T_ABORTED) {
-               if (!send_status ||
-                    (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
-                       return 1;
+       if (!send_status || (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
+               return 1;
 
-               pr_debug("Sending delayed SAM_STAT_TASK_ABORTED"
-                       " status for CDB: 0x%02x ITT: 0x%08x\n",
-                       cmd->t_task_cdb[0],
-                       cmd->se_tfo->get_task_tag(cmd));
+       pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB: 0x%02x ITT: 0x%08x\n",
+                cmd->t_task_cdb[0], cmd->se_tfo->get_task_tag(cmd));
 
-               cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
-               cmd->se_tfo->queue_status(cmd);
-               ret = 1;
-       }
-       return ret;
+       cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
+       cmd->se_tfo->queue_status(cmd);
+
+       return 1;
 }
 EXPORT_SYMBOL(transport_check_aborted_status);
 
index 6666a0c..bf0e390 100644 (file)
@@ -3,8 +3,7 @@
  *
  * This file contains logic for SPC-3 Unit Attention emulation
  *
- * Copyright (c) 2009,2010 Rising Tide Systems
- * Copyright (c) 2009,2010 Linux-iSCSI.org
+ * (c) Copyright 2009-2012 RisingTide Systems LLC.
  *
  * Nicholas A. Bellinger <nab@kernel.org>
  *
@@ -38,9 +37,8 @@
 #include "target_core_pr.h"
 #include "target_core_ua.h"
 
-int core_scsi3_ua_check(
-       struct se_cmd *cmd,
-       unsigned char *cdb)
+sense_reason_t
+target_scsi3_ua_check(struct se_cmd *cmd)
 {
        struct se_dev_entry *deve;
        struct se_session *sess = cmd->se_sess;
@@ -71,16 +69,14 @@ int core_scsi3_ua_check(
         *    was received, then the device server shall process the command
         *    and either:
         */
-       switch (cdb[0]) {
+       switch (cmd->t_task_cdb[0]) {
        case INQUIRY:
        case REPORT_LUNS:
        case REQUEST_SENSE:
                return 0;
        default:
-               return -EINVAL;
+               return TCM_CHECK_CONDITION_UNIT_ATTENTION;
        }
-
-       return -EINVAL;
 }
 
 int core_scsi3_ua_allocate(
@@ -237,7 +233,7 @@ void core_scsi3_ua_for_check_condition(
                 * highest priority UNIT_ATTENTION and ASC/ASCQ without
                 * clearing it.
                 */
-               if (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl != 0) {
+               if (dev->dev_attrib.emulate_ua_intlck_ctrl != 0) {
                        *asc = ua->ua_asc;
                        *ascq = ua->ua_ascq;
                        break;
@@ -265,8 +261,8 @@ void core_scsi3_ua_for_check_condition(
                " INTLCK_CTRL: %d, mapped LUN: %u, got CDB: 0x%02x"
                " reported ASC: 0x%02x, ASCQ: 0x%02x\n",
                nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
-               (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl != 0) ? "Reporting" :
-               "Releasing", dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl,
+               (dev->dev_attrib.emulate_ua_intlck_ctrl != 0) ? "Reporting" :
+               "Releasing", dev->dev_attrib.emulate_ua_intlck_ctrl,
                cmd->orig_fe_lun, cmd->t_task_cdb[0], *asc, *ascq);
 }
 
index 6e6b034..0204952 100644 (file)
@@ -26,7 +26,7 @@
 
 extern struct kmem_cache *se_ua_cache;
 
-extern int core_scsi3_ua_check(struct se_cmd *, unsigned char *);
+extern sense_reason_t target_scsi3_ua_check(struct se_cmd *);
 extern int core_scsi3_ua_allocate(struct se_node_acl *, u32, u8, u8);
 extern void core_scsi3_ua_release_all(struct se_dev_entry *);
 extern void core_scsi3_ua_for_check_condition(struct se_cmd *, u8 *, u8 *);
index 9585010..12d6fa2 100644 (file)
@@ -430,7 +430,6 @@ static void ft_sess_rcu_free(struct rcu_head *rcu)
 {
        struct ft_sess *sess = container_of(rcu, struct ft_sess, rcu);
 
-       transport_deregister_session(sess->se_sess);
        kfree(sess);
 }
 
@@ -438,6 +437,7 @@ static void ft_sess_free(struct kref *kref)
 {
        struct ft_sess *sess = container_of(kref, struct ft_sess, kref);
 
+       transport_deregister_session(sess->se_sess);
        call_rcu(&sess->rcu, ft_sess_rcu_free);
 }
 
index 7eb73c5..5de6e7f 100644 (file)
@@ -6,6 +6,7 @@ comment "USB Physical Layer drivers"
 
 config OMAP_USB2
        tristate "OMAP USB2 PHY Driver"
+       depends on ARCH_OMAP2PLUS
        select USB_OTG_UTILS
        help
          Enable this to support the transceiver that is part of SOC. This
index d670130..b20df5c 100644 (file)
@@ -538,10 +538,6 @@ static void tcm_vhost_submission_work(struct work_struct *work)
 
        if (tv_cmd->tvc_sgl_count) {
                sg_ptr = tv_cmd->tvc_sgl;
-               /*
-                * For BIDI commands, pass in the extra READ buffer
-                * to transport_generic_map_mem_to_cmd() below..
-                */
 /* FIXME: Fix BIDI operation in tcm_vhost_submission_work() */
 #if 0
                if (se_cmd->se_cmd_flags & SCF_BIDI) {
index 346d67d..b07b2b0 100644 (file)
@@ -1,6 +1,10 @@
 config OMAP2_VRFB
        bool
 
+if ARCH_OMAP2PLUS
+
 source "drivers/video/omap2/dss/Kconfig"
 source "drivers/video/omap2/omapfb/Kconfig"
 source "drivers/video/omap2/displays/Kconfig"
+
+endif
index c433a74..e8ca63a 100644 (file)
@@ -60,6 +60,7 @@ config W1_MASTER_GPIO
 
 config HDQ_MASTER_OMAP
        tristate "OMAP HDQ driver"
+       depends on ARCH_OMAP
        help
          Say Y here if you want support for the 1-wire or HDQ Interface
          on an OMAP processor.
index 58db6df..af47e75 100644 (file)
@@ -338,9 +338,8 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
                                enum dma_data_direction dir,
                                struct dma_attrs *attrs)
 {
-       phys_addr_t phys = page_to_phys(page) + offset;
+       phys_addr_t map, phys = page_to_phys(page) + offset;
        dma_addr_t dev_addr = xen_phys_to_bus(phys);
-       void *map;
 
        BUG_ON(dir == DMA_NONE);
        /*
@@ -356,10 +355,10 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
         * Oh well, have to allocate and map a bounce buffer.
         */
        map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir);
-       if (!map)
+       if (map == SWIOTLB_MAP_ERROR)
                return DMA_ERROR_CODE;
 
-       dev_addr = xen_virt_to_bus(map);
+       dev_addr = xen_phys_to_bus(map);
 
        /*
         * Ensure that the address returned is DMA'ble
@@ -389,7 +388,7 @@ static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
 
        /* NOTE: We use dev_addr here, not paddr! */
        if (is_xen_swiotlb_buffer(dev_addr)) {
-               swiotlb_tbl_unmap_single(hwdev, phys_to_virt(paddr), size, dir);
+               swiotlb_tbl_unmap_single(hwdev, paddr, size, dir);
                return;
        }
 
@@ -434,8 +433,7 @@ xen_swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
 
        /* NOTE: We use dev_addr here, not paddr! */
        if (is_xen_swiotlb_buffer(dev_addr)) {
-               swiotlb_tbl_sync_single(hwdev, phys_to_virt(paddr), size, dir,
-                                      target);
+               swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target);
                return;
        }
 
@@ -494,11 +492,12 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
                if (swiotlb_force ||
                    !dma_capable(hwdev, dev_addr, sg->length) ||
                    range_straddles_page_boundary(paddr, sg->length)) {
-                       void *map = swiotlb_tbl_map_single(hwdev,
-                                                          start_dma_addr,
-                                                          sg_phys(sg),
-                                                          sg->length, dir);
-                       if (!map) {
+                       phys_addr_t map = swiotlb_tbl_map_single(hwdev,
+                                                                start_dma_addr,
+                                                                sg_phys(sg),
+                                                                sg->length,
+                                                                dir);
+                       if (map == SWIOTLB_MAP_ERROR) {
                                /* Don't panic here, we expect map_sg users
                                   to do proper error handling. */
                                xen_swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
@@ -506,7 +505,7 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
                                sgl[0].dma_length = 0;
                                return DMA_ERROR_CODE;
                        }
-                       sg->dma_address = xen_virt_to_bus(map);
+                       sg->dma_address = xen_phys_to_bus(map);
                } else
                        sg->dma_address = dev_addr;
                sg->dma_length = sg->length;
index f95ae3a..eaff24a 100644 (file)
@@ -28,8 +28,8 @@ config FS_MBCACHE
        tristate
        default y if EXT2_FS=y && EXT2_FS_XATTR
        default y if EXT3_FS=y && EXT3_FS_XATTR
-       default y if EXT4_FS=y && EXT4_FS_XATTR
-       default m if EXT2_FS_XATTR || EXT3_FS_XATTR || EXT4_FS_XATTR
+       default y if EXT4_FS=y
+       default m if EXT2_FS_XATTR || EXT3_FS_XATTR || EXT4_FS
 
 source "fs/reiserfs/Kconfig"
 source "fs/jfs/Kconfig"
index 75c1ee6..5cbd00e 100644 (file)
@@ -346,19 +346,15 @@ init_cifs_idmap(void)
        if (!cred)
                return -ENOMEM;
 
-       keyring = key_alloc(&key_type_keyring, ".cifs_idmap", 0, 0, cred,
-                           (KEY_POS_ALL & ~KEY_POS_SETATTR) |
-                           KEY_USR_VIEW | KEY_USR_READ,
-                           KEY_ALLOC_NOT_IN_QUOTA);
+       keyring = keyring_alloc(".cifs_idmap", 0, 0, cred,
+                               (KEY_POS_ALL & ~KEY_POS_SETATTR) |
+                               KEY_USR_VIEW | KEY_USR_READ,
+                               KEY_ALLOC_NOT_IN_QUOTA, NULL);
        if (IS_ERR(keyring)) {
                ret = PTR_ERR(keyring);
                goto failed_put_cred;
        }
 
-       ret = key_instantiate_and_link(keyring, NULL, 0, NULL, NULL);
-       if (ret < 0)
-               goto failed_put_key;
-
        ret = register_key_type(&cifs_idmap_key_type);
        if (ret < 0)
                goto failed_put_key;
index 7e87e37..b176d42 100644 (file)
@@ -1071,8 +1071,7 @@ struct buffer_head *ext3_getblk(handle_t *handle, struct inode *inode,
         * mapped. 0 in case of a HOLE.
         */
        if (err > 0) {
-               if (err > 1)
-                       WARN_ON(1);
+               WARN_ON(err > 1);
                err = 0;
        }
        *errp = err;
index 5366393..6e50223 100644 (file)
@@ -1661,9 +1661,6 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
                return -ENOMEM;
        }
        sb->s_fs_info = sbi;
-       sbi->s_mount_opt = 0;
-       sbi->s_resuid = make_kuid(&init_user_ns, EXT3_DEF_RESUID);
-       sbi->s_resgid = make_kgid(&init_user_ns, EXT3_DEF_RESGID);
        sbi->s_sb_block = sb_block;
 
        blocksize = sb_min_blocksize(sb, EXT3_MIN_BLOCK_SIZE);
index c22f170..0a475c8 100644 (file)
@@ -39,22 +39,8 @@ config EXT4_USE_FOR_EXT23
          compiled kernel size by using one file system driver for
          ext2, ext3, and ext4 file systems.
 
-config EXT4_FS_XATTR
-       bool "Ext4 extended attributes"
-       depends on EXT4_FS
-       default y
-       help
-         Extended attributes are name:value pairs associated with inodes by
-         the kernel or by users (see the attr(5) manual page, or visit
-         <http://acl.bestbits.at/> for details).
-
-         If unsure, say N.
-
-         You need this for POSIX ACL support on ext4.
-
 config EXT4_FS_POSIX_ACL
        bool "Ext4 POSIX Access Control Lists"
-       depends on EXT4_FS_XATTR
        select FS_POSIX_ACL
        help
          POSIX Access Control Lists (ACLs) support permissions for users and
@@ -67,7 +53,6 @@ config EXT4_FS_POSIX_ACL
 
 config EXT4_FS_SECURITY
        bool "Ext4 Security Labels"
-       depends on EXT4_FS_XATTR
        help
          Security labels support alternative access control models
          implemented by security modules like SELinux.  This option
index 56fd8f8..0310fec 100644 (file)
@@ -7,8 +7,8 @@ obj-$(CONFIG_EXT4_FS) += ext4.o
 ext4-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o page-io.o \
                ioctl.o namei.o super.o symlink.o hash.o resize.o extents.o \
                ext4_jbd2.o migrate.o mballoc.o block_validity.o move_extent.o \
-               mmp.o indirect.o
+               mmp.o indirect.o extents_status.o xattr.o xattr_user.o \
+               xattr_trusted.o inline.o
 
-ext4-$(CONFIG_EXT4_FS_XATTR)           += xattr.o xattr_user.o xattr_trusted.o
 ext4-$(CONFIG_EXT4_FS_POSIX_ACL)       += acl.o
 ext4-$(CONFIG_EXT4_FS_SECURITY)                += xattr_security.o
index d3c5b88..e6e0d98 100644 (file)
@@ -423,8 +423,10 @@ ext4_xattr_set_acl(struct dentry *dentry, const char *name, const void *value,
 
 retry:
        handle = ext4_journal_start(inode, EXT4_DATA_TRANS_BLOCKS(inode->i_sb));
-       if (IS_ERR(handle))
-               return PTR_ERR(handle);
+       if (IS_ERR(handle)) {
+               error = PTR_ERR(handle);
+               goto release_and_out;
+       }
        error = ext4_set_acl(handle, inode, type, acl);
        ext4_journal_stop(handle);
        if (error == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
index 8e07d2a..b8d877f 100644 (file)
 #include <linux/slab.h>
 #include <linux/rbtree.h>
 #include "ext4.h"
-
-static unsigned char ext4_filetype_table[] = {
-       DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
-};
+#include "xattr.h"
 
 static int ext4_dx_readdir(struct file *filp,
                           void *dirent, filldir_t filldir);
 
-static unsigned char get_dtype(struct super_block *sb, int filetype)
-{
-       if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FILETYPE) ||
-           (filetype >= EXT4_FT_MAX))
-               return DT_UNKNOWN;
-
-       return (ext4_filetype_table[filetype]);
-}
-
 /**
  * Check if the given dir-inode refers to an htree-indexed directory
  * (or a directory which chould potentially get coverted to use htree
@@ -68,11 +56,14 @@ static int is_dx_dir(struct inode *inode)
  * Return 0 if the directory entry is OK, and 1 if there is a problem
  *
  * Note: this is the opposite of what ext2 and ext3 historically returned...
+ *
+ * bh passed here can be an inode block or a dir data block, depending
+ * on the inode inline data flag.
  */
 int __ext4_check_dir_entry(const char *function, unsigned int line,
                           struct inode *dir, struct file *filp,
                           struct ext4_dir_entry_2 *de,
-                          struct buffer_head *bh,
+                          struct buffer_head *bh, char *buf, int size,
                           unsigned int offset)
 {
        const char *error_msg = NULL;
@@ -85,9 +76,8 @@ int __ext4_check_dir_entry(const char *function, unsigned int line,
                error_msg = "rec_len % 4 != 0";
        else if (unlikely(rlen < EXT4_DIR_REC_LEN(de->name_len)))
                error_msg = "rec_len is too small for name_len";
-       else if (unlikely(((char *) de - bh->b_data) + rlen >
-                         dir->i_sb->s_blocksize))
-               error_msg = "directory entry across blocks";
+       else if (unlikely(((char *) de - buf) + rlen > size))
+               error_msg = "directory entry across range";
        else if (unlikely(le32_to_cpu(de->inode) >
                        le32_to_cpu(EXT4_SB(dir->i_sb)->s_es->s_inodes_count)))
                error_msg = "inode out of bounds";
@@ -98,14 +88,14 @@ int __ext4_check_dir_entry(const char *function, unsigned int line,
                ext4_error_file(filp, function, line, bh->b_blocknr,
                                "bad entry in directory: %s - offset=%u(%u), "
                                "inode=%u, rec_len=%d, name_len=%d",
-                               error_msg, (unsigned) (offset % bh->b_size),
+                               error_msg, (unsigned) (offset % size),
                                offset, le32_to_cpu(de->inode),
                                rlen, de->name_len);
        else
                ext4_error_inode(dir, function, line, bh->b_blocknr,
                                "bad entry in directory: %s - offset=%u(%u), "
                                "inode=%u, rec_len=%d, name_len=%d",
-                               error_msg, (unsigned) (offset % bh->b_size),
+                               error_msg, (unsigned) (offset % size),
                                offset, le32_to_cpu(de->inode),
                                rlen, de->name_len);
 
@@ -125,6 +115,14 @@ static int ext4_readdir(struct file *filp,
        int ret = 0;
        int dir_has_error = 0;
 
+       if (ext4_has_inline_data(inode)) {
+               int has_inline_data = 1;
+               ret = ext4_read_inline_dir(filp, dirent, filldir,
+                                          &has_inline_data);
+               if (has_inline_data)
+                       return ret;
+       }
+
        if (is_dx_dir(inode)) {
                err = ext4_dx_readdir(filp, dirent, filldir);
                if (err != ERR_BAD_DX_DIR) {
@@ -221,8 +219,9 @@ revalidate:
                while (!error && filp->f_pos < inode->i_size
                       && offset < sb->s_blocksize) {
                        de = (struct ext4_dir_entry_2 *) (bh->b_data + offset);
-                       if (ext4_check_dir_entry(inode, filp, de,
-                                                bh, offset)) {
+                       if (ext4_check_dir_entry(inode, filp, de, bh,
+                                                bh->b_data, bh->b_size,
+                                                offset)) {
                                /*
                                 * On error, skip the f_pos to the next block
                                 */
index df163da..8462eb3 100644 (file)
 #define ext4_debug(fmt, ...)   no_printk(fmt, ##__VA_ARGS__)
 #endif
 
+/*
+ * Turn on EXT_DEBUG to get lots of info about extents operations.
+ */
+#define EXT_DEBUG__
+#ifdef EXT_DEBUG
+#define ext_debug(fmt, ...)    printk(fmt, ##__VA_ARGS__)
+#else
+#define ext_debug(fmt, ...)    no_printk(fmt, ##__VA_ARGS__)
+#endif
+
 #define EXT4_ERROR_INODE(inode, fmt, a...) \
        ext4_error_inode((inode), __func__, __LINE__, 0, (fmt), ## a)
 
@@ -392,6 +402,7 @@ struct flex_groups {
 #define EXT4_EXTENTS_FL                        0x00080000 /* Inode uses extents */
 #define EXT4_EA_INODE_FL               0x00200000 /* Inode used for large EA */
 #define EXT4_EOFBLOCKS_FL              0x00400000 /* Blocks allocated beyond EOF */
+#define EXT4_INLINE_DATA_FL            0x10000000 /* Inode has inline data. */
 #define EXT4_RESERVED_FL               0x80000000 /* reserved for ext4 lib */
 
 #define EXT4_FL_USER_VISIBLE           0x004BDFFF /* User visible flags */
@@ -448,28 +459,26 @@ enum {
        EXT4_INODE_EXTENTS      = 19,   /* Inode uses extents */
        EXT4_INODE_EA_INODE     = 21,   /* Inode used for large EA */
        EXT4_INODE_EOFBLOCKS    = 22,   /* Blocks allocated beyond EOF */
+       EXT4_INODE_INLINE_DATA  = 28,   /* Data in inode. */
        EXT4_INODE_RESERVED     = 31,   /* reserved for ext4 lib */
 };
 
-#define TEST_FLAG_VALUE(FLAG) (EXT4_##FLAG##_FL == (1 << EXT4_INODE_##FLAG))
-#define CHECK_FLAG_VALUE(FLAG) if (!TEST_FLAG_VALUE(FLAG)) { \
-       printk(KERN_EMERG "EXT4 flag fail: " #FLAG ": %d %d\n", \
-               EXT4_##FLAG##_FL, EXT4_INODE_##FLAG); BUG_ON(1); }
-
-/*
- * Since it's pretty easy to mix up bit numbers and hex values, and we
- * can't do a compile-time test for ENUM values, we use a run-time
- * test to make sure that EXT4_XXX_FL is consistent with respect to
- * EXT4_INODE_XXX.  If all is well the printk and BUG_ON will all drop
- * out so it won't cost any extra space in the compiled kernel image.
- * But it's important that these values are the same, since we are
- * using EXT4_INODE_XXX to test for the flag values, but EXT4_XX_FL
- * must be consistent with the values of FS_XXX_FL defined in
- * include/linux/fs.h and the on-disk values found in ext2, ext3, and
- * ext4 filesystems, and of course the values defined in e2fsprogs.
+/*
+ * Since it's pretty easy to mix up bit numbers and hex values, we use a
+ * build-time check to make sure that EXT4_XXX_FL is consistent with respect to
+ * EXT4_INODE_XXX. If all is well, the macros will be dropped, so, it won't cost
+ * any extra space in the compiled kernel image, otherwise, the build will fail.
+ * It's important that these values are the same, since we are using
+ * EXT4_INODE_XXX to test for flag values, but EXT4_XXX_FL must be consistent
+ * with the values of FS_XXX_FL defined in include/linux/fs.h and the on-disk
+ * values found in ext2, ext3 and ext4 filesystems, and of course the values
+ * defined in e2fsprogs.
  *
  * It's not paranoia if the Murphy's Law really *is* out to get you.  :-)
  */
+#define TEST_FLAG_VALUE(FLAG) (EXT4_##FLAG##_FL == (1 << EXT4_INODE_##FLAG))
+#define CHECK_FLAG_VALUE(FLAG) BUILD_BUG_ON(!TEST_FLAG_VALUE(FLAG))
+
 static inline void ext4_check_flag_values(void)
 {
        CHECK_FLAG_VALUE(SECRM);
@@ -494,6 +503,7 @@ static inline void ext4_check_flag_values(void)
        CHECK_FLAG_VALUE(EXTENTS);
        CHECK_FLAG_VALUE(EA_INODE);
        CHECK_FLAG_VALUE(EOFBLOCKS);
+       CHECK_FLAG_VALUE(INLINE_DATA);
        CHECK_FLAG_VALUE(RESERVED);
 }
 
@@ -811,6 +821,8 @@ struct ext4_ext_cache {
        __u32           ec_len; /* must be 32bit to return holes */
 };
 
+#include "extents_status.h"
+
 /*
  * fourth extended file system inode data in memory
  */
@@ -833,7 +845,6 @@ struct ext4_inode_info {
 #endif
        unsigned long   i_flags;
 
-#ifdef CONFIG_EXT4_FS_XATTR
        /*
         * Extended attributes can be read independently of the main file
         * data. Taking i_mutex even when reading would cause contention
@@ -842,7 +853,6 @@ struct ext4_inode_info {
         * EAs.
         */
        struct rw_semaphore xattr_sem;
-#endif
 
        struct list_head i_orphan;      /* unlinked but open inodes */
 
@@ -888,6 +898,10 @@ struct ext4_inode_info {
        struct list_head i_prealloc_list;
        spinlock_t i_prealloc_lock;
 
+       /* extents status tree */
+       struct ext4_es_tree i_es_tree;
+       rwlock_t i_es_lock;
+
        /* ialloc */
        ext4_group_t    i_last_alloc_group;
 
@@ -902,6 +916,10 @@ struct ext4_inode_info {
        /* on-disk additional length */
        __u16 i_extra_isize;
 
+       /* Indicate the inline data space. */
+       u16 i_inline_off;
+       u16 i_inline_size;
+
 #ifdef CONFIG_QUOTA
        /* quota space reservation, managed internally by quota code */
        qsize_t i_reserved_quota;
@@ -1360,6 +1378,7 @@ enum {
        EXT4_STATE_DELALLOC_RESERVED,   /* blks already reserved for delalloc */
        EXT4_STATE_DIOREAD_LOCK,        /* Disable support for dio read
                                           nolocking */
+       EXT4_STATE_MAY_INLINE_DATA,     /* may have in-inode data */
 };
 
 #define EXT4_INODE_BIT_FNS(name, field, offset)                                \
@@ -1481,7 +1500,7 @@ static inline void ext4_clear_state_flags(struct ext4_inode_info *ei)
 #define EXT4_FEATURE_INCOMPAT_DIRDATA          0x1000 /* data in dirent */
 #define EXT4_FEATURE_INCOMPAT_BG_USE_META_CSUM 0x2000 /* use crc32c for bg */
 #define EXT4_FEATURE_INCOMPAT_LARGEDIR         0x4000 /* >2GB or 3-lvl htree */
-#define EXT4_FEATURE_INCOMPAT_INLINEDATA       0x8000 /* data in inode */
+#define EXT4_FEATURE_INCOMPAT_INLINE_DATA      0x8000 /* data in inode */
 
 #define EXT2_FEATURE_COMPAT_SUPP       EXT4_FEATURE_COMPAT_EXT_ATTR
 #define EXT2_FEATURE_INCOMPAT_SUPP     (EXT4_FEATURE_INCOMPAT_FILETYPE| \
@@ -1505,7 +1524,8 @@ static inline void ext4_clear_state_flags(struct ext4_inode_info *ei)
                                         EXT4_FEATURE_INCOMPAT_EXTENTS| \
                                         EXT4_FEATURE_INCOMPAT_64BIT| \
                                         EXT4_FEATURE_INCOMPAT_FLEX_BG| \
-                                        EXT4_FEATURE_INCOMPAT_MMP)
+                                        EXT4_FEATURE_INCOMPAT_MMP |    \
+                                        EXT4_FEATURE_INCOMPAT_INLINE_DATA)
 #define EXT4_FEATURE_RO_COMPAT_SUPP    (EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER| \
                                         EXT4_FEATURE_RO_COMPAT_LARGE_FILE| \
                                         EXT4_FEATURE_RO_COMPAT_GDT_CSUM| \
@@ -1592,6 +1612,11 @@ struct ext4_dir_entry_tail {
        __le32  det_checksum;           /* crc32c(uuid+inum+dirblock) */
 };
 
+#define EXT4_DIRENT_TAIL(block, blocksize) \
+       ((struct ext4_dir_entry_tail *)(((void *)(block)) + \
+                                       ((blocksize) - \
+                                        sizeof(struct ext4_dir_entry_tail))))
+
 /*
  * Ext4 directory file types.  Only the low 3 bits are used.  The
  * other bits are reserved for now.
@@ -1936,14 +1961,42 @@ ext4_fsblk_t ext4_inode_to_goal_block(struct inode *);
 extern int __ext4_check_dir_entry(const char *, unsigned int, struct inode *,
                                  struct file *,
                                  struct ext4_dir_entry_2 *,
-                                 struct buffer_head *, unsigned int);
-#define ext4_check_dir_entry(dir, filp, de, bh, offset)                        \
+                                 struct buffer_head *, char *, int,
+                                 unsigned int);
+#define ext4_check_dir_entry(dir, filp, de, bh, buf, size, offset)     \
        unlikely(__ext4_check_dir_entry(__func__, __LINE__, (dir), (filp), \
-                                       (de), (bh), (offset)))
+                                       (de), (bh), (buf), (size), (offset)))
 extern int ext4_htree_store_dirent(struct file *dir_file, __u32 hash,
                                    __u32 minor_hash,
                                    struct ext4_dir_entry_2 *dirent);
 extern void ext4_htree_free_dir_info(struct dir_private_info *p);
+extern int ext4_find_dest_de(struct inode *dir, struct inode *inode,
+                            struct buffer_head *bh,
+                            void *buf, int buf_size,
+                            const char *name, int namelen,
+                            struct ext4_dir_entry_2 **dest_de);
+void ext4_insert_dentry(struct inode *inode,
+                       struct ext4_dir_entry_2 *de,
+                       int buf_size,
+                       const char *name, int namelen);
+static inline void ext4_update_dx_flag(struct inode *inode)
+{
+       if (!EXT4_HAS_COMPAT_FEATURE(inode->i_sb,
+                                    EXT4_FEATURE_COMPAT_DIR_INDEX))
+               ext4_clear_inode_flag(inode, EXT4_INODE_INDEX);
+}
+static unsigned char ext4_filetype_table[] = {
+       DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
+};
+
+static inline  unsigned char get_dtype(struct super_block *sb, int filetype)
+{
+       if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FILETYPE) ||
+           (filetype >= EXT4_FT_MAX))
+               return DT_UNKNOWN;
+
+       return ext4_filetype_table[filetype];
+}
 
 /* fsync.c */
 extern int ext4_sync_file(struct file *, loff_t, loff_t, int);
@@ -1994,8 +2047,23 @@ struct buffer_head *ext4_getblk(handle_t *, struct inode *,
                                                ext4_lblk_t, int, int *);
 struct buffer_head *ext4_bread(handle_t *, struct inode *,
                                                ext4_lblk_t, int, int *);
+int ext4_get_block_write(struct inode *inode, sector_t iblock,
+                        struct buffer_head *bh_result, int create);
 int ext4_get_block(struct inode *inode, sector_t iblock,
                                struct buffer_head *bh_result, int create);
+int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
+                          struct buffer_head *bh, int create);
+int ext4_walk_page_buffers(handle_t *handle,
+                          struct buffer_head *head,
+                          unsigned from,
+                          unsigned to,
+                          int *partial,
+                          int (*fn)(handle_t *handle,
+                                    struct buffer_head *bh));
+int do_journal_get_write_access(handle_t *handle,
+                               struct buffer_head *bh);
+#define FALL_BACK_TO_NONDELALLOC 1
+#define CONVERT_INLINE_DATA     2
 
 extern struct inode *ext4_iget(struct super_block *, unsigned long);
 extern int  ext4_write_inode(struct inode *, struct writeback_control *);
@@ -2050,6 +2118,20 @@ extern int ext4_orphan_add(handle_t *, struct inode *);
 extern int ext4_orphan_del(handle_t *, struct inode *);
 extern int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash,
                                __u32 start_minor_hash, __u32 *next_hash);
+extern int search_dir(struct buffer_head *bh,
+                     char *search_buf,
+                     int buf_size,
+                     struct inode *dir,
+                     const struct qstr *d_name,
+                     unsigned int offset,
+                     struct ext4_dir_entry_2 **res_dir);
+extern int ext4_generic_delete_entry(handle_t *handle,
+                                    struct inode *dir,
+                                    struct ext4_dir_entry_2 *de_del,
+                                    struct buffer_head *bh,
+                                    void *entry_buf,
+                                    int buf_size,
+                                    int csum_size);
 
 /* resize.c */
 extern int ext4_group_add(struct super_block *sb,
@@ -2376,6 +2458,15 @@ extern void ext4_unwritten_wait(struct inode *inode);
 extern const struct inode_operations ext4_dir_inode_operations;
 extern const struct inode_operations ext4_special_inode_operations;
 extern struct dentry *ext4_get_parent(struct dentry *child);
+extern struct ext4_dir_entry_2 *ext4_init_dot_dotdot(struct inode *inode,
+                                struct ext4_dir_entry_2 *de,
+                                int blocksize, int csum_size,
+                                unsigned int parent_ino, int dotdot_real_len);
+extern void initialize_dirent_tail(struct ext4_dir_entry_tail *t,
+                                  unsigned int blocksize);
+extern int ext4_handle_dirty_dirent_node(handle_t *handle,
+                                        struct inode *inode,
+                                        struct buffer_head *bh);
 
 /* symlink.c */
 extern const struct inode_operations ext4_symlink_inode_operations;
@@ -2393,6 +2484,9 @@ extern int ext4_check_blockref(const char *, unsigned int,
                               struct inode *, __le32 *, unsigned int);
 
 /* extents.c */
+struct ext4_ext_path;
+struct ext4_extent;
+
 extern int ext4_ext_tree_init(handle_t *handle, struct inode *);
 extern int ext4_ext_writepage_trans_blocks(struct inode *, int);
 extern int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks,
@@ -2410,8 +2504,27 @@ extern int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
                          ssize_t len);
 extern int ext4_map_blocks(handle_t *handle, struct inode *inode,
                           struct ext4_map_blocks *map, int flags);
+extern int ext4_ext_calc_metadata_amount(struct inode *inode,
+                                        ext4_lblk_t lblocks);
+extern int ext4_extent_tree_init(handle_t *, struct inode *);
+extern int ext4_ext_calc_credits_for_single_extent(struct inode *inode,
+                                                  int num,
+                                                  struct ext4_ext_path *path);
+extern int ext4_can_extents_be_merged(struct inode *inode,
+                                     struct ext4_extent *ex1,
+                                     struct ext4_extent *ex2);
+extern int ext4_ext_insert_extent(handle_t *, struct inode *,
+                                 struct ext4_ext_path *,
+                                 struct ext4_extent *, int);
+extern struct ext4_ext_path *ext4_ext_find_extent(struct inode *, ext4_lblk_t,
+                                                 struct ext4_ext_path *);
+extern void ext4_ext_drop_refs(struct ext4_ext_path *);
+extern int ext4_ext_check_inode(struct inode *inode);
+extern int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk);
 extern int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
                        __u64 start, __u64 len);
+
+
 /* move_extent.c */
 extern int ext4_move_extents(struct file *o_filp, struct file *d_filp,
                             __u64 start_orig, __u64 start_donor,
@@ -2445,14 +2558,10 @@ enum ext4_state_bits {
                                 * never, ever appear in a buffer_head's state
                                 * flag. See EXT4_MAP_FROM_CLUSTER to see where
                                 * this is used. */
-       BH_Da_Mapped,   /* Delayed allocated block that now has a mapping. This
-                        * flag is set when ext4_map_blocks is called on a
-                        * delayed allocated block to get its real mapping. */
 };
 
 BUFFER_FNS(Uninit, uninit)
 TAS_BUFFER_FNS(Uninit, uninit)
-BUFFER_FNS(Da_Mapped, da_mapped)
 
 /*
  * Add new method to test whether block and inode bitmaps are properly
@@ -2503,6 +2612,4 @@ extern void ext4_resize_end(struct super_block *sb);
 
 #endif /* __KERNEL__ */
 
-#include "ext4_extents.h"
-
 #endif /* _EXT4_H */
index cb1b2c9..487fda1 100644 (file)
 #define CHECK_BINSEARCH__
 
 /*
- * Turn on EXT_DEBUG to get lots of info about extents operations.
- */
-#define EXT_DEBUG__
-#ifdef EXT_DEBUG
-#define ext_debug(fmt, ...)    printk(fmt, ##__VA_ARGS__)
-#else
-#define ext_debug(fmt, ...)    no_printk(fmt, ##__VA_ARGS__)
-#endif
-
-/*
  * If EXT_STATS is defined then stats numbers are collected.
  * These number will be displayed at umount time.
  */
@@ -144,20 +134,6 @@ struct ext4_ext_path {
  */
 
 /*
- * to be called by ext4_ext_walk_space()
- * negative retcode - error
- * positive retcode - signal for ext4_ext_walk_space(), see below
- * callback must return valid extent (passed or newly created)
- */
-typedef int (*ext_prepare_callback)(struct inode *, ext4_lblk_t,
-                                       struct ext4_ext_cache *,
-                                       struct ext4_extent *, void *);
-
-#define EXT_CONTINUE   0
-#define EXT_BREAK      1
-#define EXT_REPEAT     2
-
-/*
  * Maximum number of logical blocks in a file; ext4_extent's ee_block is
  * __le32.
  */
@@ -300,21 +276,5 @@ static inline void ext4_idx_store_pblock(struct ext4_extent_idx *ix,
                                     0xffff);
 }
 
-extern int ext4_ext_calc_metadata_amount(struct inode *inode,
-                                        ext4_lblk_t lblocks);
-extern int ext4_extent_tree_init(handle_t *, struct inode *);
-extern int ext4_ext_calc_credits_for_single_extent(struct inode *inode,
-                                                  int num,
-                                                  struct ext4_ext_path *path);
-extern int ext4_can_extents_be_merged(struct inode *inode,
-                                     struct ext4_extent *ex1,
-                                     struct ext4_extent *ex2);
-extern int ext4_ext_insert_extent(handle_t *, struct inode *, struct ext4_ext_path *, struct ext4_extent *, int);
-extern struct ext4_ext_path *ext4_ext_find_extent(struct inode *, ext4_lblk_t,
-                                                       struct ext4_ext_path *);
-extern void ext4_ext_drop_refs(struct ext4_ext_path *);
-extern int ext4_ext_check_inode(struct inode *inode);
-extern int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk,
-                                     int search_hint_reverse);
 #endif /* _EXT4_EXTENTS */
 
index 56d258c..7177f9b 100644 (file)
@@ -254,13 +254,6 @@ static inline void ext4_handle_sync(handle_t *handle)
                handle->h_sync = 1;
 }
 
-static inline void ext4_handle_release_buffer(handle_t *handle,
-                                               struct buffer_head *bh)
-{
-       if (ext4_handle_valid(handle))
-               jbd2_journal_release_buffer(handle, bh);
-}
-
 static inline int ext4_handle_is_aborted(handle_t *handle)
 {
        if (ext4_handle_valid(handle))
index 7011ac9..26af228 100644 (file)
@@ -41,6 +41,8 @@
 #include <asm/uaccess.h>
 #include <linux/fiemap.h>
 #include "ext4_jbd2.h"
+#include "ext4_extents.h"
+#include "xattr.h"
 
 #include <trace/events/ext4.h>
 
@@ -109,6 +111,9 @@ static int ext4_split_extent_at(handle_t *handle,
                             int split_flag,
                             int flags);
 
+static int ext4_find_delayed_extent(struct inode *inode,
+                                   struct ext4_ext_cache *newex);
+
 static int ext4_ext_truncate_extend_restart(handle_t *handle,
                                            struct inode *inode,
                                            int needed)
@@ -1959,27 +1964,33 @@ cleanup:
        return err;
 }
 
-static int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block,
-                              ext4_lblk_t num, ext_prepare_callback func,
-                              void *cbdata)
+static int ext4_fill_fiemap_extents(struct inode *inode,
+                                   ext4_lblk_t block, ext4_lblk_t num,
+                                   struct fiemap_extent_info *fieinfo)
 {
        struct ext4_ext_path *path = NULL;
-       struct ext4_ext_cache cbex;
+       struct ext4_ext_cache newex;
        struct ext4_extent *ex;
-       ext4_lblk_t next, start = 0, end = 0;
+       ext4_lblk_t next, next_del, start = 0, end = 0;
        ext4_lblk_t last = block + num;
-       int depth, exists, err = 0;
-
-       BUG_ON(func == NULL);
-       BUG_ON(inode == NULL);
+       int exists, depth = 0, err = 0;
+       unsigned int flags = 0;
+       unsigned char blksize_bits = inode->i_sb->s_blocksize_bits;
 
        while (block < last && block != EXT_MAX_BLOCKS) {
                num = last - block;
                /* find extent for this block */
                down_read(&EXT4_I(inode)->i_data_sem);
+
+               if (path && ext_depth(inode) != depth) {
+                       /* depth was changed. we have to realloc path */
+                       kfree(path);
+                       path = NULL;
+               }
+
                path = ext4_ext_find_extent(inode, block, path);
-               up_read(&EXT4_I(inode)->i_data_sem);
                if (IS_ERR(path)) {
+                       up_read(&EXT4_I(inode)->i_data_sem);
                        err = PTR_ERR(path);
                        path = NULL;
                        break;
@@ -1987,13 +1998,16 @@ static int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block,
 
                depth = ext_depth(inode);
                if (unlikely(path[depth].p_hdr == NULL)) {
+                       up_read(&EXT4_I(inode)->i_data_sem);
                        EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
                        err = -EIO;
                        break;
                }
                ex = path[depth].p_ext;
                next = ext4_ext_next_allocated_block(path);
+               ext4_ext_drop_refs(path);
 
+               flags = 0;
                exists = 0;
                if (!ex) {
                        /* there is no extent yet, so try to allocate
@@ -2030,40 +2044,64 @@ static int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block,
                BUG_ON(end <= start);
 
                if (!exists) {
-                       cbex.ec_block = start;
-                       cbex.ec_len = end - start;
-                       cbex.ec_start = 0;
+                       newex.ec_block = start;
+                       newex.ec_len = end - start;
+                       newex.ec_start = 0;
                } else {
-                       cbex.ec_block = le32_to_cpu(ex->ee_block);
-                       cbex.ec_len = ext4_ext_get_actual_len(ex);
-                       cbex.ec_start = ext4_ext_pblock(ex);
+                       newex.ec_block = le32_to_cpu(ex->ee_block);
+                       newex.ec_len = ext4_ext_get_actual_len(ex);
+                       newex.ec_start = ext4_ext_pblock(ex);
+                       if (ext4_ext_is_uninitialized(ex))
+                               flags |= FIEMAP_EXTENT_UNWRITTEN;
                }
 
-               if (unlikely(cbex.ec_len == 0)) {
-                       EXT4_ERROR_INODE(inode, "cbex.ec_len == 0");
-                       err = -EIO;
-                       break;
+               /*
+                * Find delayed extent and update newex accordingly. We call
+                * it even in !exists case to find out whether newex is the
+                * last existing extent or not.
+                */
+               next_del = ext4_find_delayed_extent(inode, &newex);
+               if (!exists && next_del) {
+                       exists = 1;
+                       flags |= FIEMAP_EXTENT_DELALLOC;
                }
-               err = func(inode, next, &cbex, ex, cbdata);
-               ext4_ext_drop_refs(path);
+               up_read(&EXT4_I(inode)->i_data_sem);
 
-               if (err < 0)
+               if (unlikely(newex.ec_len == 0)) {
+                       EXT4_ERROR_INODE(inode, "newex.ec_len == 0");
+                       err = -EIO;
                        break;
+               }
 
-               if (err == EXT_REPEAT)
-                       continue;
-               else if (err == EXT_BREAK) {
-                       err = 0;
-                       break;
+               /* This is possible iff next == next_del == EXT_MAX_BLOCKS */
+               if (next == next_del) {
+                       flags |= FIEMAP_EXTENT_LAST;
+                       if (unlikely(next_del != EXT_MAX_BLOCKS ||
+                                    next != EXT_MAX_BLOCKS)) {
+                               EXT4_ERROR_INODE(inode,
+                                                "next extent == %u, next "
+                                                "delalloc extent = %u",
+                                                next, next_del);
+                               err = -EIO;
+                               break;
+                       }
                }
 
-               if (ext_depth(inode) != depth) {
-                       /* depth was changed. we have to realloc path */
-                       kfree(path);
-                       path = NULL;
+               if (exists) {
+                       err = fiemap_fill_next_extent(fieinfo,
+                               (__u64)newex.ec_block << blksize_bits,
+                               (__u64)newex.ec_start << blksize_bits,
+                               (__u64)newex.ec_len << blksize_bits,
+                               flags);
+                       if (err < 0)
+                               break;
+                       if (err == 1) {
+                               err = 0;
+                               break;
+                       }
                }
 
-               block = cbex.ec_block + cbex.ec_len;
+               block = newex.ec_block + newex.ec_len;
        }
 
        if (path) {
@@ -2156,7 +2194,6 @@ ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
                  struct ext4_extent *ex)
 {
        struct ext4_ext_cache *cex;
-       struct ext4_sb_info *sbi;
        int ret = 0;
 
        /*
@@ -2164,7 +2201,6 @@ ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
         */
        spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
        cex = &EXT4_I(inode)->i_cached_extent;
-       sbi = EXT4_SB(inode->i_sb);
 
        /* has cache valid data? */
        if (cex->ec_len == 0)
@@ -2273,7 +2309,13 @@ int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks,
 int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
 {
        int index;
-       int depth = ext_depth(inode);
+       int depth;
+
+       /* If we are converting the inline data, only one is needed here. */
+       if (ext4_has_inline_data(inode))
+               return 1;
+
+       depth = ext_depth(inode);
 
        if (chunk)
                index = depth * 2;
@@ -3461,115 +3503,34 @@ out:
 /**
  * ext4_find_delalloc_range: find delayed allocated block in the given range.
  *
- * Goes through the buffer heads in the range [lblk_start, lblk_end] and returns
- * whether there are any buffers marked for delayed allocation. It returns '1'
- * on the first delalloc'ed buffer head found. If no buffer head in the given
- * range is marked for delalloc, it returns 0.
- * lblk_start should always be <= lblk_end.
- * search_hint_reverse is to indicate that searching in reverse from lblk_end to
- * lblk_start might be more efficient (i.e., we will likely hit the delalloc'ed
- * block sooner). This is useful when blocks are truncated sequentially from
- * lblk_start towards lblk_end.
+ * Return 1 if there is a delalloc block in the range, otherwise 0.
  */
 static int ext4_find_delalloc_range(struct inode *inode,
                                    ext4_lblk_t lblk_start,
-                                   ext4_lblk_t lblk_end,
-                                   int search_hint_reverse)
+                                   ext4_lblk_t lblk_end)
 {
-       struct address_space *mapping = inode->i_mapping;
-       struct buffer_head *head, *bh = NULL;
-       struct page *page;
-       ext4_lblk_t i, pg_lblk;
-       pgoff_t index;
-
-       if (!test_opt(inode->i_sb, DELALLOC))
-               return 0;
-
-       /* reverse search wont work if fs block size is less than page size */
-       if (inode->i_blkbits < PAGE_CACHE_SHIFT)
-               search_hint_reverse = 0;
+       struct extent_status es;
 
-       if (search_hint_reverse)
-               i = lblk_end;
+       es.start = lblk_start;
+       ext4_es_find_extent(inode, &es);
+       if (es.len == 0)
+               return 0; /* there is no delay extent in this tree */
+       else if (es.start <= lblk_start && lblk_start < es.start + es.len)
+               return 1;
+       else if (lblk_start <= es.start && es.start <= lblk_end)
+               return 1;
        else
-               i = lblk_start;
-
-       index = i >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
-
-       while ((i >= lblk_start) && (i <= lblk_end)) {
-               page = find_get_page(mapping, index);
-               if (!page)
-                       goto nextpage;
-
-               if (!page_has_buffers(page))
-                       goto nextpage;
-
-               head = page_buffers(page);
-               if (!head)
-                       goto nextpage;
-
-               bh = head;
-               pg_lblk = index << (PAGE_CACHE_SHIFT -
-                                               inode->i_blkbits);
-               do {
-                       if (unlikely(pg_lblk < lblk_start)) {
-                               /*
-                                * This is possible when fs block size is less
-                                * than page size and our cluster starts/ends in
-                                * middle of the page. So we need to skip the
-                                * initial few blocks till we reach the 'lblk'
-                                */
-                               pg_lblk++;
-                               continue;
-                       }
-
-                       /* Check if the buffer is delayed allocated and that it
-                        * is not yet mapped. (when da-buffers are mapped during
-                        * their writeout, their da_mapped bit is set.)
-                        */
-                       if (buffer_delay(bh) && !buffer_da_mapped(bh)) {
-                               page_cache_release(page);
-                               trace_ext4_find_delalloc_range(inode,
-                                               lblk_start, lblk_end,
-                                               search_hint_reverse,
-                                               1, i);
-                               return 1;
-                       }
-                       if (search_hint_reverse)
-                               i--;
-                       else
-                               i++;
-               } while ((i >= lblk_start) && (i <= lblk_end) &&
-                               ((bh = bh->b_this_page) != head));
-nextpage:
-               if (page)
-                       page_cache_release(page);
-               /*
-                * Move to next page. 'i' will be the first lblk in the next
-                * page.
-                */
-               if (search_hint_reverse)
-                       index--;
-               else
-                       index++;
-               i = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
-       }
-
-       trace_ext4_find_delalloc_range(inode, lblk_start, lblk_end,
-                                       search_hint_reverse, 0, 0);
-       return 0;
+               return 0;
 }
 
-int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk,
-                              int search_hint_reverse)
+int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk)
 {
        struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
        ext4_lblk_t lblk_start, lblk_end;
        lblk_start = lblk & (~(sbi->s_cluster_ratio - 1));
        lblk_end = lblk_start + sbi->s_cluster_ratio - 1;
 
-       return ext4_find_delalloc_range(inode, lblk_start, lblk_end,
-                                       search_hint_reverse);
+       return ext4_find_delalloc_range(inode, lblk_start, lblk_end);
 }
 
 /**
@@ -3630,7 +3591,7 @@ get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start,
                lblk_from = lblk_start & (~(sbi->s_cluster_ratio - 1));
                lblk_to = lblk_from + c_offset - 1;
 
-               if (ext4_find_delalloc_range(inode, lblk_from, lblk_to, 0))
+               if (ext4_find_delalloc_range(inode, lblk_from, lblk_to))
                        allocated_clusters--;
        }
 
@@ -3640,7 +3601,7 @@ get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start,
                lblk_from = lblk_start + num_blks;
                lblk_to = lblk_from + (sbi->s_cluster_ratio - c_offset) - 1;
 
-               if (ext4_find_delalloc_range(inode, lblk_from, lblk_to, 0))
+               if (ext4_find_delalloc_range(inode, lblk_from, lblk_to))
                        allocated_clusters--;
        }
 
@@ -3663,8 +3624,8 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
                  flags, allocated);
        ext4_ext_show_leaf(inode, path);
 
-       trace_ext4_ext_handle_uninitialized_extents(inode, map, allocated,
-                                                   newblock);
+       trace_ext4_ext_handle_uninitialized_extents(inode, map, flags,
+                                                   allocated, newblock);
 
        /* get_block() before submit the IO, split the extent */
        if ((flags & EXT4_GET_BLOCKS_PRE_IO)) {
@@ -3911,7 +3872,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
        struct ext4_extent newex, *ex, *ex2;
        struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
        ext4_fsblk_t newblock = 0;
-       int free_on_err = 0, err = 0, depth, ret;
+       int free_on_err = 0, err = 0, depth;
        unsigned int allocated = 0, offset = 0;
        unsigned int allocated_clusters = 0;
        struct ext4_allocation_request ar;
@@ -3927,7 +3888,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
        if (ext4_ext_in_cache(inode, map->m_lblk, &newex)) {
                if (!newex.ee_start_lo && !newex.ee_start_hi) {
                        if ((sbi->s_cluster_ratio > 1) &&
-                           ext4_find_delalloc_cluster(inode, map->m_lblk, 0))
+                           ext4_find_delalloc_cluster(inode, map->m_lblk))
                                map->m_flags |= EXT4_MAP_FROM_CLUSTER;
 
                        if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
@@ -4007,15 +3968,15 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
                                        ee_len, ee_start);
                                goto out;
                        }
-                       ret = ext4_ext_handle_uninitialized_extents(
+                       allocated = ext4_ext_handle_uninitialized_extents(
                                handle, inode, map, path, flags,
                                allocated, newblock);
-                       return ret;
+                       goto out3;
                }
        }
 
        if ((sbi->s_cluster_ratio > 1) &&
-           ext4_find_delalloc_cluster(inode, map->m_lblk, 0))
+           ext4_find_delalloc_cluster(inode, map->m_lblk))
                map->m_flags |= EXT4_MAP_FROM_CLUSTER;
 
        /*
@@ -4284,8 +4245,8 @@ out2:
                kfree(path);
        }
 
-       trace_ext4_ext_map_blocks_exit(inode, map->m_lblk,
-               newblock, map->m_len, err ? err : allocated);
+out3:
+       trace_ext4_ext_map_blocks_exit(inode, map, err ? err : allocated);
 
        return err ? err : allocated;
 }
@@ -4344,6 +4305,8 @@ void ext4_ext_truncate(struct inode *inode)
 
        last_block = (inode->i_size + sb->s_blocksize - 1)
                        >> EXT4_BLOCK_SIZE_BITS(sb);
+       err = ext4_es_remove_extent(inode, last_block,
+                                   EXT_MAX_BLOCKS - last_block);
        err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1);
 
        /* In a multi-transaction truncate, we only make the final
@@ -4434,6 +4397,10 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
        if (mode & FALLOC_FL_PUNCH_HOLE)
                return ext4_punch_hole(file, offset, len);
 
+       ret = ext4_convert_inline_data(inode);
+       if (ret)
+               return ret;
+
        trace_ext4_fallocate_enter(inode, offset, len, mode);
        map.m_lblk = offset >> blkbits;
        /*
@@ -4572,206 +4539,43 @@ int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
 }
 
 /*
- * Callback function called for each extent to gather FIEMAP information.
+ * If newex is not existing extent (newex->ec_start equals zero) find
+ * delayed extent at start of newex and update newex accordingly and
+ * return start of the next delayed extent.
+ *
+ * If newex is existing extent (newex->ec_start is not equal zero)
+ * return start of next delayed extent or EXT_MAX_BLOCKS if no delayed
+ * extent found. Leave newex unmodified.
  */
-static int ext4_ext_fiemap_cb(struct inode *inode, ext4_lblk_t next,
-                      struct ext4_ext_cache *newex, struct ext4_extent *ex,
-                      void *data)
+static int ext4_find_delayed_extent(struct inode *inode,
+                                   struct ext4_ext_cache *newex)
 {
-       __u64   logical;
-       __u64   physical;
-       __u64   length;
-       __u32   flags = 0;
-       int             ret = 0;
-       struct fiemap_extent_info *fieinfo = data;
-       unsigned char blksize_bits;
+       struct extent_status es;
+       ext4_lblk_t next_del;
 
-       blksize_bits = inode->i_sb->s_blocksize_bits;
-       logical = (__u64)newex->ec_block << blksize_bits;
+       es.start = newex->ec_block;
+       next_del = ext4_es_find_extent(inode, &es);
 
        if (newex->ec_start == 0) {
                /*
                 * No extent in extent-tree contains block @newex->ec_start,
                 * then the block may stay in 1)a hole or 2)delayed-extent.
-                *
-                * Holes or delayed-extents are processed as follows.
-                * 1. lookup dirty pages with specified range in pagecache.
-                *    If no page is got, then there is no delayed-extent and
-                *    return with EXT_CONTINUE.
-                * 2. find the 1st mapped buffer,
-                * 3. check if the mapped buffer is both in the request range
-                *    and a delayed buffer. If not, there is no delayed-extent,
-                *    then return.
-                * 4. a delayed-extent is found, the extent will be collected.
                 */
-               ext4_lblk_t     end = 0;
-               pgoff_t         last_offset;
-               pgoff_t         offset;
-               pgoff_t         index;
-               pgoff_t         start_index = 0;
-               struct page     **pages = NULL;
-               struct buffer_head *bh = NULL;
-               struct buffer_head *head = NULL;
-               unsigned int nr_pages = PAGE_SIZE / sizeof(struct page *);
-
-               pages = kmalloc(PAGE_SIZE, GFP_KERNEL);
-               if (pages == NULL)
-                       return -ENOMEM;
-
-               offset = logical >> PAGE_SHIFT;
-repeat:
-               last_offset = offset;
-               head = NULL;
-               ret = find_get_pages_tag(inode->i_mapping, &offset,
-                                       PAGECACHE_TAG_DIRTY, nr_pages, pages);
-
-               if (!(flags & FIEMAP_EXTENT_DELALLOC)) {
-                       /* First time, try to find a mapped buffer. */
-                       if (ret == 0) {
-out:
-                               for (index = 0; index < ret; index++)
-                                       page_cache_release(pages[index]);
-                               /* just a hole. */
-                               kfree(pages);
-                               return EXT_CONTINUE;
-                       }
-                       index = 0;
-
-next_page:
-                       /* Try to find the 1st mapped buffer. */
-                       end = ((__u64)pages[index]->index << PAGE_SHIFT) >>
-                                 blksize_bits;
-                       if (!page_has_buffers(pages[index]))
-                               goto out;
-                       head = page_buffers(pages[index]);
-                       if (!head)
-                               goto out;
-
-                       index++;
-                       bh = head;
-                       do {
-                               if (end >= newex->ec_block +
-                                       newex->ec_len)
-                                       /* The buffer is out of
-                                        * the request range.
-                                        */
-                                       goto out;
-
-                               if (buffer_mapped(bh) &&
-                                   end >= newex->ec_block) {
-                                       start_index = index - 1;
-                                       /* get the 1st mapped buffer. */
-                                       goto found_mapped_buffer;
-                               }
-
-                               bh = bh->b_this_page;
-                               end++;
-                       } while (bh != head);
-
-                       /* No mapped buffer in the range found in this page,
-                        * We need to look up next page.
-                        */
-                       if (index >= ret) {
-                               /* There is no page left, but we need to limit
-                                * newex->ec_len.
-                                */
-                               newex->ec_len = end - newex->ec_block;
-                               goto out;
-                       }
-                       goto next_page;
-               } else {
-                       /*Find contiguous delayed buffers. */
-                       if (ret > 0 && pages[0]->index == last_offset)
-                               head = page_buffers(pages[0]);
-                       bh = head;
-                       index = 1;
-                       start_index = 0;
-               }
-
-found_mapped_buffer:
-               if (bh != NULL && buffer_delay(bh)) {
-                       /* 1st or contiguous delayed buffer found. */
-                       if (!(flags & FIEMAP_EXTENT_DELALLOC)) {
-                               /*
-                                * 1st delayed buffer found, record
-                                * the start of extent.
-                                */
-                               flags |= FIEMAP_EXTENT_DELALLOC;
-                               newex->ec_block = end;
-                               logical = (__u64)end << blksize_bits;
-                       }
-                       /* Find contiguous delayed buffers. */
-                       do {
-                               if (!buffer_delay(bh))
-                                       goto found_delayed_extent;
-                               bh = bh->b_this_page;
-                               end++;
-                       } while (bh != head);
-
-                       for (; index < ret; index++) {
-                               if (!page_has_buffers(pages[index])) {
-                                       bh = NULL;
-                                       break;
-                               }
-                               head = page_buffers(pages[index]);
-                               if (!head) {
-                                       bh = NULL;
-                                       break;
-                               }
-
-                               if (pages[index]->index !=
-                                   pages[start_index]->index + index
-                                   - start_index) {
-                                       /* Blocks are not contiguous. */
-                                       bh = NULL;
-                                       break;
-                               }
-                               bh = head;
-                               do {
-                                       if (!buffer_delay(bh))
-                                               /* Delayed-extent ends. */
-                                               goto found_delayed_extent;
-                                       bh = bh->b_this_page;
-                                       end++;
-                               } while (bh != head);
-                       }
-               } else if (!(flags & FIEMAP_EXTENT_DELALLOC))
-                       /* a hole found. */
-                       goto out;
+               if (es.len == 0)
+                       /* A hole found. */
+                       return 0;
 
-found_delayed_extent:
-               newex->ec_len = min(end - newex->ec_block,
-                                               (ext4_lblk_t)EXT_INIT_MAX_LEN);
-               if (ret == nr_pages && bh != NULL &&
-                       newex->ec_len < EXT_INIT_MAX_LEN &&
-                       buffer_delay(bh)) {
-                       /* Have not collected an extent and continue. */
-                       for (index = 0; index < ret; index++)
-                               page_cache_release(pages[index]);
-                       goto repeat;
+               if (es.start > newex->ec_block) {
+                       /* A hole found. */
+                       newex->ec_len = min(es.start - newex->ec_block,
+                                           newex->ec_len);
+                       return 0;
                }
 
-               for (index = 0; index < ret; index++)
-                       page_cache_release(pages[index]);
-               kfree(pages);
+               newex->ec_len = es.start + es.len - newex->ec_block;
        }
 
-       physical = (__u64)newex->ec_start << blksize_bits;
-       length =   (__u64)newex->ec_len << blksize_bits;
-
-       if (ex && ext4_ext_is_uninitialized(ex))
-               flags |= FIEMAP_EXTENT_UNWRITTEN;
-
-       if (next == EXT_MAX_BLOCKS)
-               flags |= FIEMAP_EXTENT_LAST;
-
-       ret = fiemap_fill_next_extent(fieinfo, logical, physical,
-                                       length, flags);
-       if (ret < 0)
-               return ret;
-       if (ret == 1)
-               return EXT_BREAK;
-       return EXT_CONTINUE;
+       return next_del;
 }
 /* fiemap flags we can handle specified here */
 #define EXT4_FIEMAP_FLAGS      (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR)
@@ -4971,6 +4775,8 @@ int ext4_ext_punch_hole(struct file *file, loff_t offset, loff_t length)
        ext4_ext_invalidate_cache(inode);
        ext4_discard_preallocations(inode);
 
+       err = ext4_es_remove_extent(inode, first_block,
+                                   stop_block - first_block);
        err = ext4_ext_remove_space(inode, first_block, stop_block - 1);
 
        ext4_ext_invalidate_cache(inode);
@@ -4991,12 +4797,22 @@ out_mutex:
        mutex_unlock(&inode->i_mutex);
        return err;
 }
+
 int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
                __u64 start, __u64 len)
 {
        ext4_lblk_t start_blk;
        int error = 0;
 
+       if (ext4_has_inline_data(inode)) {
+               int has_inline = 1;
+
+               error = ext4_inline_data_fiemap(inode, fieinfo, &has_inline);
+
+               if (has_inline)
+                       return error;
+       }
+
        /* fallback to generic here if not in extents fmt */
        if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
                return generic_block_fiemap(inode, fieinfo, start, len,
@@ -5018,11 +4834,11 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
                len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1;
 
                /*
-                * Walk the extent tree gathering extent information.
-                * ext4_ext_fiemap_cb will push extents back to user.
+                * Walk the extent tree gathering extent information
+                * and pushing extents back to the user.
                 */
-               error = ext4_ext_walk_space(inode, start_blk, len_blks,
-                                         ext4_ext_fiemap_cb, fieinfo);
+               error = ext4_fill_fiemap_extents(inode, start_blk,
+                                                len_blks, fieinfo);
        }
 
        return error;
diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
new file mode 100644 (file)
index 0000000..564d981
--- /dev/null
@@ -0,0 +1,500 @@
+/*
+ *  fs/ext4/extents_status.c
+ *
+ * Written by Yongqiang Yang <xiaoqiangnk@gmail.com>
+ * Modified by
+ *     Allison Henderson <achender@linux.vnet.ibm.com>
+ *     Hugh Dickins <hughd@google.com>
+ *     Zheng Liu <wenqing.lz@taobao.com>
+ *
+ * Ext4 extents status tree core functions.
+ */
+#include <linux/rbtree.h>
+#include "ext4.h"
+#include "extents_status.h"
+#include "ext4_extents.h"
+
+#include <trace/events/ext4.h>
+
+/*
+ * According to previous discussion in Ext4 Developer Workshop, we
+ * will introduce a new structure called io tree to track all extent
+ * status in order to solve some problems that we have met
+ * (e.g. Reservation space warning), and provide extent-level locking.
+ * Delay extent tree is the first step to achieve this goal.  It is
+ * original built by Yongqiang Yang.  At that time it is called delay
+ * extent tree, whose goal is only track delay extent in memory to
+ * simplify the implementation of fiemap and bigalloc, and introduce
+ * lseek SEEK_DATA/SEEK_HOLE support.  That is why it is still called
+ * delay extent tree at the following comment.  But for better
+ * understand what it does, it has been rename to extent status tree.
+ *
+ * Currently the first step has been done.  All delay extents are
+ * tracked in the tree.  It maintains the delay extent when a delay
+ * allocation is issued, and the delay extent is written out or
+ * invalidated.  Therefore the implementation of fiemap and bigalloc
+ * are simplified, and SEEK_DATA/SEEK_HOLE are introduced.
+ *
+ * The following comment describes the implemenmtation of extent
+ * status tree and future works.
+ */
+
+/*
+ * extents status tree implementation for ext4.
+ *
+ *
+ * ==========================================================================
+ * Extents status encompass delayed extents and extent locks
+ *
+ * 1. Why delayed extent implementation ?
+ *
+ * Without delayed extent, ext4 identifies a delayed extent by looking
+ * up page cache, this has several deficiencies - complicated, buggy,
+ * and inefficient code.
+ *
+ * FIEMAP, SEEK_HOLE/DATA, bigalloc, punch hole and writeout all need
+ * to know if a block or a range of blocks are belonged to a delayed
+ * extent.
+ *
+ * Let us have a look at how they do without delayed extents implementation.
+ *   --        FIEMAP
+ *     FIEMAP looks up page cache to identify delayed allocations from holes.
+ *
+ *   --        SEEK_HOLE/DATA
+ *     SEEK_HOLE/DATA has the same problem as FIEMAP.
+ *
+ *   --        bigalloc
+ *     bigalloc looks up page cache to figure out if a block is
+ *     already under delayed allocation or not to determine whether
+ *     quota reserving is needed for the cluster.
+ *
+ *   -- punch hole
+ *     punch hole looks up page cache to identify a delayed extent.
+ *
+ *   --        writeout
+ *     Writeout looks up whole page cache to see if a buffer is
+ *     mapped, If there are not very many delayed buffers, then it is
+ *     time comsuming.
+ *
+ * With delayed extents implementation, FIEMAP, SEEK_HOLE/DATA,
+ * bigalloc and writeout can figure out if a block or a range of
+ * blocks is under delayed allocation(belonged to a delayed extent) or
+ * not by searching the delayed extent tree.
+ *
+ *
+ * ==========================================================================
+ * 2. ext4 delayed extents impelmentation
+ *
+ *   --        delayed extent
+ *     A delayed extent is a range of blocks which are contiguous
+ *     logically and under delayed allocation.  Unlike extent in
+ *     ext4, delayed extent in ext4 is a in-memory struct, there is
+ *     no corresponding on-disk data.  There is no limit on length of
+ *     delayed extent, so a delayed extent can contain as many blocks
+ *     as they are contiguous logically.
+ *
+ *   --        delayed extent tree
+ *     Every inode has a delayed extent tree and all under delayed
+ *     allocation blocks are added to the tree as delayed extents.
+ *     Delayed extents in the tree are ordered by logical block no.
+ *
+ *   --        operations on a delayed extent tree
+ *     There are three operations on a delayed extent tree: find next
+ *     delayed extent, adding a space(a range of blocks) and removing
+ *     a space.
+ *
+ *   --        race on a delayed extent tree
+ *     Delayed extent tree is protected inode->i_es_lock.
+ *
+ *
+ * ==========================================================================
+ * 3. performance analysis
+ *   --        overhead
+ *     1. There is a cache extent for write access, so if writes are
+ *     not very random, adding space operaions are in O(1) time.
+ *
+ *   --        gain
+ *     2. Code is much simpler, more readable, more maintainable and
+ *     more efficient.
+ *
+ *
+ * ==========================================================================
+ * 4. TODO list
+ *   -- Track all extent status
+ *
+ *   -- Improve get block process
+ *
+ *   -- Extent-level locking
+ */
+
+static struct kmem_cache *ext4_es_cachep;
+
+int __init ext4_init_es(void)
+{
+       ext4_es_cachep = KMEM_CACHE(extent_status, SLAB_RECLAIM_ACCOUNT);
+       if (ext4_es_cachep == NULL)
+               return -ENOMEM;
+       return 0;
+}
+
+void ext4_exit_es(void)
+{
+       if (ext4_es_cachep)
+               kmem_cache_destroy(ext4_es_cachep);
+}
+
+void ext4_es_init_tree(struct ext4_es_tree *tree)
+{
+       tree->root = RB_ROOT;
+       tree->cache_es = NULL;
+}
+
+#ifdef ES_DEBUG__
+static void ext4_es_print_tree(struct inode *inode)
+{
+       struct ext4_es_tree *tree;
+       struct rb_node *node;
+
+       printk(KERN_DEBUG "status extents for inode %lu:", inode->i_ino);
+       tree = &EXT4_I(inode)->i_es_tree;
+       node = rb_first(&tree->root);
+       while (node) {
+               struct extent_status *es;
+               es = rb_entry(node, struct extent_status, rb_node);
+               printk(KERN_DEBUG " [%u/%u)", es->start, es->len);
+               node = rb_next(node);
+       }
+       printk(KERN_DEBUG "\n");
+}
+#else
+#define ext4_es_print_tree(inode)
+#endif
+
+static inline ext4_lblk_t extent_status_end(struct extent_status *es)
+{
+       BUG_ON(es->start + es->len < es->start);
+       return es->start + es->len - 1;
+}
+
+/*
+ * search through the tree for an delayed extent with a given offset.  If
+ * it can't be found, try to find next extent.
+ */
+static struct extent_status *__es_tree_search(struct rb_root *root,
+                                             ext4_lblk_t offset)
+{
+       struct rb_node *node = root->rb_node;
+       struct extent_status *es = NULL;
+
+       while (node) {
+               es = rb_entry(node, struct extent_status, rb_node);
+               if (offset < es->start)
+                       node = node->rb_left;
+               else if (offset > extent_status_end(es))
+                       node = node->rb_right;
+               else
+                       return es;
+       }
+
+       if (es && offset < es->start)
+               return es;
+
+       if (es && offset > extent_status_end(es)) {
+               node = rb_next(&es->rb_node);
+               return node ? rb_entry(node, struct extent_status, rb_node) :
+                             NULL;
+       }
+
+       return NULL;
+}
+
+/*
+ * ext4_es_find_extent: find the 1st delayed extent covering @es->start
+ * if it exists, otherwise, the next extent after @es->start.
+ *
+ * @inode: the inode which owns delayed extents
+ * @es: delayed extent that we found
+ *
+ * Returns the first block of the next extent after es, otherwise
+ * EXT_MAX_BLOCKS if no delay extent is found.
+ * Delayed extent is returned via @es.
+ */
+ext4_lblk_t ext4_es_find_extent(struct inode *inode, struct extent_status *es)
+{
+       struct ext4_es_tree *tree = NULL;
+       struct extent_status *es1 = NULL;
+       struct rb_node *node;
+       ext4_lblk_t ret = EXT_MAX_BLOCKS;
+
+       trace_ext4_es_find_extent_enter(inode, es->start);
+
+       read_lock(&EXT4_I(inode)->i_es_lock);
+       tree = &EXT4_I(inode)->i_es_tree;
+
+       /* find delay extent in cache firstly */
+       if (tree->cache_es) {
+               es1 = tree->cache_es;
+               if (in_range(es->start, es1->start, es1->len)) {
+                       es_debug("%u cached by [%u/%u)\n",
+                                es->start, es1->start, es1->len);
+                       goto out;
+               }
+       }
+
+       es->len = 0;
+       es1 = __es_tree_search(&tree->root, es->start);
+
+out:
+       if (es1) {
+               tree->cache_es = es1;
+               es->start = es1->start;
+               es->len = es1->len;
+               node = rb_next(&es1->rb_node);
+               if (node) {
+                       es1 = rb_entry(node, struct extent_status, rb_node);
+                       ret = es1->start;
+               }
+       }
+
+       read_unlock(&EXT4_I(inode)->i_es_lock);
+
+       trace_ext4_es_find_extent_exit(inode, es, ret);
+       return ret;
+}
+
+static struct extent_status *
+ext4_es_alloc_extent(ext4_lblk_t start, ext4_lblk_t len)
+{
+       struct extent_status *es;
+       es = kmem_cache_alloc(ext4_es_cachep, GFP_ATOMIC);
+       if (es == NULL)
+               return NULL;
+       es->start = start;
+       es->len = len;
+       return es;
+}
+
+static void ext4_es_free_extent(struct extent_status *es)
+{
+       kmem_cache_free(ext4_es_cachep, es);
+}
+
+static struct extent_status *
+ext4_es_try_to_merge_left(struct ext4_es_tree *tree, struct extent_status *es)
+{
+       struct extent_status *es1;
+       struct rb_node *node;
+
+       node = rb_prev(&es->rb_node);
+       if (!node)
+               return es;
+
+       es1 = rb_entry(node, struct extent_status, rb_node);
+       if (es->start == extent_status_end(es1) + 1) {
+               es1->len += es->len;
+               rb_erase(&es->rb_node, &tree->root);
+               ext4_es_free_extent(es);
+               es = es1;
+       }
+
+       return es;
+}
+
+static struct extent_status *
+ext4_es_try_to_merge_right(struct ext4_es_tree *tree, struct extent_status *es)
+{
+       struct extent_status *es1;
+       struct rb_node *node;
+
+       node = rb_next(&es->rb_node);
+       if (!node)
+               return es;
+
+       es1 = rb_entry(node, struct extent_status, rb_node);
+       if (es1->start == extent_status_end(es) + 1) {
+               es->len += es1->len;
+               rb_erase(node, &tree->root);
+               ext4_es_free_extent(es1);
+       }
+
+       return es;
+}
+
+static int __es_insert_extent(struct ext4_es_tree *tree, ext4_lblk_t offset,
+                             ext4_lblk_t len)
+{
+       struct rb_node **p = &tree->root.rb_node;
+       struct rb_node *parent = NULL;
+       struct extent_status *es;
+       ext4_lblk_t end = offset + len - 1;
+
+       BUG_ON(end < offset);
+       es = tree->cache_es;
+       if (es && offset == (extent_status_end(es) + 1)) {
+               es_debug("cached by [%u/%u)\n", es->start, es->len);
+               es->len += len;
+               es = ext4_es_try_to_merge_right(tree, es);
+               goto out;
+       } else if (es && es->start == end + 1) {
+               es_debug("cached by [%u/%u)\n", es->start, es->len);
+               es->start = offset;
+               es->len += len;
+               es = ext4_es_try_to_merge_left(tree, es);
+               goto out;
+       } else if (es && es->start <= offset &&
+                  end <= extent_status_end(es)) {
+               es_debug("cached by [%u/%u)\n", es->start, es->len);
+               goto out;
+       }
+
+       while (*p) {
+               parent = *p;
+               es = rb_entry(parent, struct extent_status, rb_node);
+
+               if (offset < es->start) {
+                       if (es->start == end + 1) {
+                               es->start = offset;
+                               es->len += len;
+                               es = ext4_es_try_to_merge_left(tree, es);
+                               goto out;
+                       }
+                       p = &(*p)->rb_left;
+               } else if (offset > extent_status_end(es)) {
+                       if (offset == extent_status_end(es) + 1) {
+                               es->len += len;
+                               es = ext4_es_try_to_merge_right(tree, es);
+                               goto out;
+                       }
+                       p = &(*p)->rb_right;
+               } else {
+                       if (extent_status_end(es) <= end)
+                               es->len = offset - es->start + len;
+                       goto out;
+               }
+       }
+
+       es = ext4_es_alloc_extent(offset, len);
+       if (!es)
+               return -ENOMEM;
+       rb_link_node(&es->rb_node, parent, p);
+       rb_insert_color(&es->rb_node, &tree->root);
+
+out:
+       tree->cache_es = es;
+       return 0;
+}
+
+/*
+ * ext4_es_insert_extent() adds a space to a delayed extent tree.
+ * Caller holds inode->i_es_lock.
+ *
+ * ext4_es_insert_extent is called by ext4_da_write_begin and
+ * ext4_es_remove_extent.
+ *
+ * Return 0 on success, error code on failure.
+ */
+int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t offset,
+                         ext4_lblk_t len)
+{
+       struct ext4_es_tree *tree;
+       int err = 0;
+
+       trace_ext4_es_insert_extent(inode, offset, len);
+       es_debug("add [%u/%u) to extent status tree of inode %lu\n",
+                offset, len, inode->i_ino);
+
+       write_lock(&EXT4_I(inode)->i_es_lock);
+       tree = &EXT4_I(inode)->i_es_tree;
+       err = __es_insert_extent(tree, offset, len);
+       write_unlock(&EXT4_I(inode)->i_es_lock);
+
+       ext4_es_print_tree(inode);
+
+       return err;
+}
+
+/*
+ * ext4_es_remove_extent() removes a space from a delayed extent tree.
+ * Caller holds inode->i_es_lock.
+ *
+ * Return 0 on success, error code on failure.
+ */
+int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t offset,
+                         ext4_lblk_t len)
+{
+       struct rb_node *node;
+       struct ext4_es_tree *tree;
+       struct extent_status *es;
+       struct extent_status orig_es;
+       ext4_lblk_t len1, len2, end;
+       int err = 0;
+
+       trace_ext4_es_remove_extent(inode, offset, len);
+       es_debug("remove [%u/%u) from extent status tree of inode %lu\n",
+                offset, len, inode->i_ino);
+
+       end = offset + len - 1;
+       BUG_ON(end < offset);
+       write_lock(&EXT4_I(inode)->i_es_lock);
+       tree = &EXT4_I(inode)->i_es_tree;
+       es = __es_tree_search(&tree->root, offset);
+       if (!es)
+               goto out;
+       if (es->start > end)
+               goto out;
+
+       /* Simply invalidate cache_es. */
+       tree->cache_es = NULL;
+
+       orig_es.start = es->start;
+       orig_es.len = es->len;
+       len1 = offset > es->start ? offset - es->start : 0;
+       len2 = extent_status_end(es) > end ?
+              extent_status_end(es) - end : 0;
+       if (len1 > 0)
+               es->len = len1;
+       if (len2 > 0) {
+               if (len1 > 0) {
+                       err = __es_insert_extent(tree, end + 1, len2);
+                       if (err) {
+                               es->start = orig_es.start;
+                               es->len = orig_es.len;
+                               goto out;
+                       }
+               } else {
+                       es->start = end + 1;
+                       es->len = len2;
+               }
+               goto out;
+       }
+
+       if (len1 > 0) {
+               node = rb_next(&es->rb_node);
+               if (node)
+                       es = rb_entry(node, struct extent_status, rb_node);
+               else
+                       es = NULL;
+       }
+
+       while (es && extent_status_end(es) <= end) {
+               node = rb_next(&es->rb_node);
+               rb_erase(&es->rb_node, &tree->root);
+               ext4_es_free_extent(es);
+               if (!node) {
+                       es = NULL;
+                       break;
+               }
+               es = rb_entry(node, struct extent_status, rb_node);
+       }
+
+       if (es && es->start < end + 1) {
+               len1 = extent_status_end(es) - end;
+               es->start = end + 1;
+               es->len = len1;
+       }
+
+out:
+       write_unlock(&EXT4_I(inode)->i_es_lock);
+       ext4_es_print_tree(inode);
+       return err;
+}
diff --git a/fs/ext4/extents_status.h b/fs/ext4/extents_status.h
new file mode 100644 (file)
index 0000000..077f82d
--- /dev/null
@@ -0,0 +1,45 @@
+/*
+ *  fs/ext4/extents_status.h
+ *
+ * Written by Yongqiang Yang <xiaoqiangnk@gmail.com>
+ * Modified by
+ *     Allison Henderson <achender@linux.vnet.ibm.com>
+ *     Zheng Liu <wenqing.lz@taobao.com>
+ *
+ */
+
+#ifndef _EXT4_EXTENTS_STATUS_H
+#define _EXT4_EXTENTS_STATUS_H
+
+/*
+ * Turn on ES_DEBUG__ to get lots of info about extent status operations.
+ */
+#ifdef ES_DEBUG__
+#define es_debug(fmt, ...)     printk(fmt, ##__VA_ARGS__)
+#else
+#define es_debug(fmt, ...)     no_printk(fmt, ##__VA_ARGS__)
+#endif
+
+struct extent_status {
+       struct rb_node rb_node;
+       ext4_lblk_t start;      /* first block extent covers */
+       ext4_lblk_t len;        /* length of extent in block */
+};
+
+struct ext4_es_tree {
+       struct rb_root root;
+       struct extent_status *cache_es; /* recently accessed extent */
+};
+
+extern int __init ext4_init_es(void);
+extern void ext4_exit_es(void);
+extern void ext4_es_init_tree(struct ext4_es_tree *tree);
+
+extern int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t start,
+                                ext4_lblk_t len);
+extern int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t start,
+                                ext4_lblk_t len);
+extern ext4_lblk_t ext4_es_find_extent(struct inode *inode,
+                               struct extent_status *es);
+
+#endif /* _EXT4_EXTENTS_STATUS_H */
index bf3966b..b64a60b 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/mount.h>
 #include <linux/path.h>
 #include <linux/quotaops.h>
+#include <linux/pagevec.h>
 #include "ext4.h"
 #include "ext4_jbd2.h"
 #include "xattr.h"
@@ -286,6 +287,324 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
 }
 
 /*
+ * Here we use ext4_map_blocks() to get a block mapping for a extent-based
+ * file rather than ext4_ext_walk_space() because we can introduce
+ * SEEK_DATA/SEEK_HOLE for block-mapped and extent-mapped file at the same
+ * function.  When extent status tree has been fully implemented, it will
+ * track all extent status for a file and we can directly use it to
+ * retrieve the offset for SEEK_DATA/SEEK_HOLE.
+ */
+
+/*
+ * When we retrieve the offset for SEEK_DATA/SEEK_HOLE, we would need to
+ * lookup page cache to check whether or not there has some data between
+ * [startoff, endoff] because, if this range contains an unwritten extent,
+ * we determine this extent as a data or a hole according to whether the
+ * page cache has data or not.
+ */
+static int ext4_find_unwritten_pgoff(struct inode *inode,
+                                    int origin,
+                                    struct ext4_map_blocks *map,
+                                    loff_t *offset)
+{
+       struct pagevec pvec;
+       unsigned int blkbits;
+       pgoff_t index;
+       pgoff_t end;
+       loff_t endoff;
+       loff_t startoff;
+       loff_t lastoff;
+       int found = 0;
+
+       blkbits = inode->i_sb->s_blocksize_bits;
+       startoff = *offset;
+       lastoff = startoff;
+       endoff = (map->m_lblk + map->m_len) << blkbits;
+
+       index = startoff >> PAGE_CACHE_SHIFT;
+       end = endoff >> PAGE_CACHE_SHIFT;
+
+       pagevec_init(&pvec, 0);
+       do {
+               int i, num;
+               unsigned long nr_pages;
+
+               num = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
+               nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
+                                         (pgoff_t)num);
+               if (nr_pages == 0) {
+                       if (origin == SEEK_DATA)
+                               break;
+
+                       BUG_ON(origin != SEEK_HOLE);
+                       /*
+                        * If this is the first time to go into the loop and
+                        * offset is not beyond the end offset, it will be a
+                        * hole at this offset
+                        */
+                       if (lastoff == startoff || lastoff < endoff)
+                               found = 1;
+                       break;
+               }
+
+               /*
+                * If this is the first time to go into the loop and
+                * offset is smaller than the first page offset, it will be a
+                * hole at this offset.
+                */
+               if (lastoff == startoff && origin == SEEK_HOLE &&
+                   lastoff < page_offset(pvec.pages[0])) {
+                       found = 1;
+                       break;
+               }
+
+               for (i = 0; i < nr_pages; i++) {
+                       struct page *page = pvec.pages[i];
+                       struct buffer_head *bh, *head;
+
+                       /*
+                        * If the current offset is not beyond the end of given
+                        * range, it will be a hole.
+                        */
+                       if (lastoff < endoff && origin == SEEK_HOLE &&
+                           page->index > end) {
+                               found = 1;
+                               *offset = lastoff;
+                               goto out;
+                       }
+
+                       lock_page(page);
+
+                       if (unlikely(page->mapping != inode->i_mapping)) {
+                               unlock_page(page);
+                               continue;
+                       }
+
+                       if (!page_has_buffers(page)) {
+                               unlock_page(page);
+                               continue;
+                       }
+
+                       if (page_has_buffers(page)) {
+                               lastoff = page_offset(page);
+                               bh = head = page_buffers(page);
+                               do {
+                                       if (buffer_uptodate(bh) ||
+                                           buffer_unwritten(bh)) {
+                                               if (origin == SEEK_DATA)
+                                                       found = 1;
+                                       } else {
+                                               if (origin == SEEK_HOLE)
+                                                       found = 1;
+                                       }
+                                       if (found) {
+                                               *offset = max_t(loff_t,
+                                                       startoff, lastoff);
+                                               unlock_page(page);
+                                               goto out;
+                                       }
+                                       lastoff += bh->b_size;
+                                       bh = bh->b_this_page;
+                               } while (bh != head);
+                       }
+
+                       lastoff = page_offset(page) + PAGE_SIZE;
+                       unlock_page(page);
+               }
+
+               /*
+                * The no. of pages is less than our desired, that would be a
+                * hole in there.
+                */
+               if (nr_pages < num && origin == SEEK_HOLE) {
+                       found = 1;
+                       *offset = lastoff;
+                       break;
+               }
+
+               index = pvec.pages[i - 1]->index + 1;
+               pagevec_release(&pvec);
+       } while (index <= end);
+
+out:
+       pagevec_release(&pvec);
+       return found;
+}
+
+/*
+ * ext4_seek_data() retrieves the offset for SEEK_DATA.
+ */
+static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize)
+{
+       struct inode *inode = file->f_mapping->host;
+       struct ext4_map_blocks map;
+       struct extent_status es;
+       ext4_lblk_t start, last, end;
+       loff_t dataoff, isize;
+       int blkbits;
+       int ret = 0;
+
+       mutex_lock(&inode->i_mutex);
+
+       isize = i_size_read(inode);
+       if (offset >= isize) {
+               mutex_unlock(&inode->i_mutex);
+               return -ENXIO;
+       }
+
+       blkbits = inode->i_sb->s_blocksize_bits;
+       start = offset >> blkbits;
+       last = start;
+       end = isize >> blkbits;
+       dataoff = offset;
+
+       do {
+               map.m_lblk = last;
+               map.m_len = end - last + 1;
+               ret = ext4_map_blocks(NULL, inode, &map, 0);
+               if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) {
+                       if (last != start)
+                               dataoff = last << blkbits;
+                       break;
+               }
+
+               /*
+                * If there is a delay extent at this offset,
+                * it will be as a data.
+                */
+               es.start = last;
+               (void)ext4_es_find_extent(inode, &es);
+               if (last >= es.start &&
+                   last < es.start + es.len) {
+                       if (last != start)
+                               dataoff = last << blkbits;
+                       break;
+               }
+
+               /*
+                * If there is a unwritten extent at this offset,
+                * it will be as a data or a hole according to page
+                * cache that has data or not.
+                */
+               if (map.m_flags & EXT4_MAP_UNWRITTEN) {
+                       int unwritten;
+                       unwritten = ext4_find_unwritten_pgoff(inode, SEEK_DATA,
+                                                             &map, &dataoff);
+                       if (unwritten)
+                               break;
+               }
+
+               last++;
+               dataoff = last << blkbits;
+       } while (last <= end);
+
+       mutex_unlock(&inode->i_mutex);
+
+       if (dataoff > isize)
+               return -ENXIO;
+
+       if (dataoff < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET))
+               return -EINVAL;
+       if (dataoff > maxsize)
+               return -EINVAL;
+
+       if (dataoff != file->f_pos) {
+               file->f_pos = dataoff;
+               file->f_version = 0;
+       }
+
+       return dataoff;
+}
+
+/*
+ * ext4_seek_hole() retrieves the offset for SEEK_HOLE.
+ */
+static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize)
+{
+       struct inode *inode = file->f_mapping->host;
+       struct ext4_map_blocks map;
+       struct extent_status es;
+       ext4_lblk_t start, last, end;
+       loff_t holeoff, isize;
+       int blkbits;
+       int ret = 0;
+
+       mutex_lock(&inode->i_mutex);
+
+       isize = i_size_read(inode);
+       if (offset >= isize) {
+               mutex_unlock(&inode->i_mutex);
+               return -ENXIO;
+       }
+
+       blkbits = inode->i_sb->s_blocksize_bits;
+       start = offset >> blkbits;
+       last = start;
+       end = isize >> blkbits;
+       holeoff = offset;
+
+       do {
+               map.m_lblk = last;
+               map.m_len = end - last + 1;
+               ret = ext4_map_blocks(NULL, inode, &map, 0);
+               if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) {
+                       last += ret;
+                       holeoff = last << blkbits;
+                       continue;
+               }
+
+               /*
+                * If there is a delay extent at this offset,
+                * we will skip this extent.
+                */
+               es.start = last;
+               (void)ext4_es_find_extent(inode, &es);
+               if (last >= es.start &&
+                   last < es.start + es.len) {
+                       last = es.start + es.len;
+                       holeoff = last << blkbits;
+                       continue;
+               }
+
+               /*
+                * If there is a unwritten extent at this offset,
+                * it will be as a data or a hole according to page
+                * cache that has data or not.
+                */
+               if (map.m_flags & EXT4_MAP_UNWRITTEN) {
+                       int unwritten;
+                       unwritten = ext4_find_unwritten_pgoff(inode, SEEK_HOLE,
+                                                             &map, &holeoff);
+                       if (!unwritten) {
+                               last += ret;
+                               holeoff = last << blkbits;
+                               continue;
+                       }
+               }
+
+               /* find a hole */
+               break;
+       } while (last <= end);
+
+       mutex_unlock(&inode->i_mutex);
+
+       if (holeoff > isize)
+               holeoff = isize;
+
+       if (holeoff < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET))
+               return -EINVAL;
+       if (holeoff > maxsize)
+               return -EINVAL;
+
+       if (holeoff != file->f_pos) {
+               file->f_pos = holeoff;
+               file->f_version = 0;
+       }
+
+       return holeoff;
+}
+
+/*
  * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values
  * by calling generic_file_llseek_size() with the appropriate maxbytes
  * value for each.
@@ -300,8 +619,19 @@ loff_t ext4_llseek(struct file *file, loff_t offset, int origin)
        else
                maxbytes = inode->i_sb->s_maxbytes;
 
-       return generic_file_llseek_size(file, offset, origin,
-                                       maxbytes, i_size_read(inode));
+       switch (origin) {
+       case SEEK_SET:
+       case SEEK_CUR:
+       case SEEK_END:
+               return generic_file_llseek_size(file, offset, origin,
+                                               maxbytes, i_size_read(inode));
+       case SEEK_DATA:
+               return ext4_seek_data(file, offset, maxbytes);
+       case SEEK_HOLE:
+               return ext4_seek_hole(file, offset, maxbytes);
+       }
+
+       return -EINVAL;
 }
 
 const struct file_operations ext4_file_operations = {
@@ -326,12 +656,10 @@ const struct file_operations ext4_file_operations = {
 const struct inode_operations ext4_file_inode_operations = {
        .setattr        = ext4_setattr,
        .getattr        = ext4_getattr,
-#ifdef CONFIG_EXT4_FS_XATTR
        .setxattr       = generic_setxattr,
        .getxattr       = generic_getxattr,
        .listxattr      = ext4_listxattr,
        .removexattr    = generic_removexattr,
-#endif
        .get_acl        = ext4_get_acl,
        .fiemap         = ext4_fiemap,
 };
index be1d89f..dfbc1fe 100644 (file)
@@ -44,7 +44,6 @@
  */
 static int ext4_sync_parent(struct inode *inode)
 {
-       struct writeback_control wbc;
        struct dentry *dentry = NULL;
        struct inode *next;
        int ret = 0;
@@ -66,10 +65,7 @@ static int ext4_sync_parent(struct inode *inode)
                ret = sync_mapping_buffers(inode->i_mapping);
                if (ret)
                        break;
-               memset(&wbc, 0, sizeof(wbc));
-               wbc.sync_mode = WB_SYNC_ALL;
-               wbc.nr_to_write = 0;         /* only write out the inode */
-               ret = sync_inode(inode, &wbc);
+               ret = sync_inode_metadata(inode, 1);
                if (ret)
                        break;
        }
index 3a100e7..3f32c80 100644 (file)
@@ -762,7 +762,6 @@ got:
 
                BUFFER_TRACE(block_bitmap_bh, "dirty block bitmap");
                err = ext4_handle_dirty_metadata(handle, NULL, block_bitmap_bh);
-               brelse(block_bitmap_bh);
 
                /* recheck and clear flag under lock if we still need to */
                ext4_lock_group(sb, group);
@@ -775,6 +774,7 @@ got:
                        ext4_group_desc_csum_set(sb, group, gdp);
                }
                ext4_unlock_group(sb, group);
+               brelse(block_bitmap_bh);
 
                if (err)
                        goto fail;
@@ -902,6 +902,10 @@ got:
 
        ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize;
 
+       ei->i_inline_off = 0;
+       if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_INLINE_DATA))
+               ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
+
        ret = inode;
        dquot_initialize(inode);
        err = dquot_alloc_inode(inode);
index 792e388..20862f9 100644 (file)
@@ -22,6 +22,7 @@
 
 #include "ext4_jbd2.h"
 #include "truncate.h"
+#include "ext4_extents.h"      /* Needed for EXT_MAX_BLOCKS */
 
 #include <trace/events/ext4.h>
 
@@ -755,8 +756,7 @@ cleanup:
                partial--;
        }
 out:
-       trace_ext4_ind_map_blocks_exit(inode, map->m_lblk,
-                               map->m_pblk, map->m_len, err);
+       trace_ext4_ind_map_blocks_exit(inode, map, err);
        return err;
 }
 
@@ -1412,6 +1412,7 @@ void ext4_ind_truncate(struct inode *inode)
        down_write(&ei->i_data_sem);
 
        ext4_discard_preallocations(inode);
+       ext4_es_remove_extent(inode, last_block, EXT_MAX_BLOCKS - last_block);
 
        /*
         * The orphan list entry will now protect us from any crash which
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
new file mode 100644 (file)
index 0000000..387c47c
--- /dev/null
@@ -0,0 +1,1884 @@
+/*
+ * Copyright (c) 2012 Taobao.
+ * Written by Tao Ma <boyu.mt@taobao.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2.1 of the GNU Lesser General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include "ext4_jbd2.h"
+#include "ext4.h"
+#include "xattr.h"
+#include "truncate.h"
+#include <linux/fiemap.h>
+
+#define EXT4_XATTR_SYSTEM_DATA "data"
+#define EXT4_MIN_INLINE_DATA_SIZE      ((sizeof(__le32) * EXT4_N_BLOCKS))
+#define EXT4_INLINE_DOTDOT_SIZE        4
+
+int ext4_get_inline_size(struct inode *inode)
+{
+       if (EXT4_I(inode)->i_inline_off)
+               return EXT4_I(inode)->i_inline_size;
+
+       return 0;
+}
+
+static int get_max_inline_xattr_value_size(struct inode *inode,
+                                          struct ext4_iloc *iloc)
+{
+       struct ext4_xattr_ibody_header *header;
+       struct ext4_xattr_entry *entry;
+       struct ext4_inode *raw_inode;
+       int free, min_offs;
+
+       min_offs = EXT4_SB(inode->i_sb)->s_inode_size -
+                       EXT4_GOOD_OLD_INODE_SIZE -
+                       EXT4_I(inode)->i_extra_isize -
+                       sizeof(struct ext4_xattr_ibody_header);
+
+       /*
+        * We need to subtract another sizeof(__u32) since an in-inode xattr
+        * needs an empty 4 bytes to indicate the gap between the xattr entry
+        * and the name/value pair.
+        */
+       if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR))
+               return EXT4_XATTR_SIZE(min_offs -
+                       EXT4_XATTR_LEN(strlen(EXT4_XATTR_SYSTEM_DATA)) -
+                       EXT4_XATTR_ROUND - sizeof(__u32));
+
+       raw_inode = ext4_raw_inode(iloc);
+       header = IHDR(inode, raw_inode);
+       entry = IFIRST(header);
+
+       /* Compute min_offs. */
+       for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
+               if (!entry->e_value_block && entry->e_value_size) {
+                       size_t offs = le16_to_cpu(entry->e_value_offs);
+                       if (offs < min_offs)
+                               min_offs = offs;
+               }
+       }
+       free = min_offs -
+               ((void *)entry - (void *)IFIRST(header)) - sizeof(__u32);
+
+       if (EXT4_I(inode)->i_inline_off) {
+               entry = (struct ext4_xattr_entry *)
+                       ((void *)raw_inode + EXT4_I(inode)->i_inline_off);
+
+               free += le32_to_cpu(entry->e_value_size);
+               goto out;
+       }
+
+       free -= EXT4_XATTR_LEN(strlen(EXT4_XATTR_SYSTEM_DATA));
+
+       if (free > EXT4_XATTR_ROUND)
+               free = EXT4_XATTR_SIZE(free - EXT4_XATTR_ROUND);
+       else
+               free = 0;
+
+out:
+       return free;
+}
+
+/*
+ * Get the maximum size we now can store in an inode.
+ * If we can't find the space for a xattr entry, don't use the space
+ * of the extents since we have no space to indicate the inline data.
+ */
+int ext4_get_max_inline_size(struct inode *inode)
+{
+       int error, max_inline_size;
+       struct ext4_iloc iloc;
+
+       if (EXT4_I(inode)->i_extra_isize == 0)
+               return 0;
+
+       error = ext4_get_inode_loc(inode, &iloc);
+       if (error) {
+               ext4_error_inode(inode, __func__, __LINE__, 0,
+                                "can't get inode location %lu",
+                                inode->i_ino);
+               return 0;
+       }
+
+       down_read(&EXT4_I(inode)->xattr_sem);
+       max_inline_size = get_max_inline_xattr_value_size(inode, &iloc);
+       up_read(&EXT4_I(inode)->xattr_sem);
+
+       brelse(iloc.bh);
+
+       if (!max_inline_size)
+               return 0;
+
+       return max_inline_size + EXT4_MIN_INLINE_DATA_SIZE;
+}
+
+int ext4_has_inline_data(struct inode *inode)
+{
+       return ext4_test_inode_flag(inode, EXT4_INODE_INLINE_DATA) &&
+              EXT4_I(inode)->i_inline_off;
+}
+
+/*
+ * this function does not take xattr_sem, which is OK because it is
+ * currently only used in a code path coming form ext4_iget, before
+ * the new inode has been unlocked
+ */
+int ext4_find_inline_data_nolock(struct inode *inode)
+{
+       struct ext4_xattr_ibody_find is = {
+               .s = { .not_found = -ENODATA, },
+       };
+       struct ext4_xattr_info i = {
+               .name_index = EXT4_XATTR_INDEX_SYSTEM,
+               .name = EXT4_XATTR_SYSTEM_DATA,
+       };
+       int error;
+
+       if (EXT4_I(inode)->i_extra_isize == 0)
+               return 0;
+
+       error = ext4_get_inode_loc(inode, &is.iloc);
+       if (error)
+               return error;
+
+       error = ext4_xattr_ibody_find(inode, &i, &is);
+       if (error)
+               goto out;
+
+       if (!is.s.not_found) {
+               EXT4_I(inode)->i_inline_off = (u16)((void *)is.s.here -
+                                       (void *)ext4_raw_inode(&is.iloc));
+               EXT4_I(inode)->i_inline_size = EXT4_MIN_INLINE_DATA_SIZE +
+                               le32_to_cpu(is.s.here->e_value_size);
+               ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
+       }
+out:
+       brelse(is.iloc.bh);
+       return error;
+}
+
+static int ext4_read_inline_data(struct inode *inode, void *buffer,
+                                unsigned int len,
+                                struct ext4_iloc *iloc)
+{
+       struct ext4_xattr_entry *entry;
+       struct ext4_xattr_ibody_header *header;
+       int cp_len = 0;
+       struct ext4_inode *raw_inode;
+
+       if (!len)
+               return 0;
+
+       BUG_ON(len > EXT4_I(inode)->i_inline_size);
+
+       cp_len = len < EXT4_MIN_INLINE_DATA_SIZE ?
+                       len : EXT4_MIN_INLINE_DATA_SIZE;
+
+       raw_inode = ext4_raw_inode(iloc);
+       memcpy(buffer, (void *)(raw_inode->i_block), cp_len);
+
+       len -= cp_len;
+       buffer += cp_len;
+
+       if (!len)
+               goto out;
+
+       header = IHDR(inode, raw_inode);
+       entry = (struct ext4_xattr_entry *)((void *)raw_inode +
+                                           EXT4_I(inode)->i_inline_off);
+       len = min_t(unsigned int, len,
+                   (unsigned int)le32_to_cpu(entry->e_value_size));
+
+       memcpy(buffer,
+              (void *)IFIRST(header) + le16_to_cpu(entry->e_value_offs), len);
+       cp_len += len;
+
+out:
+       return cp_len;
+}
+
+/*
+ * write the buffer to the inline inode.
+ * If 'create' is set, we don't need to do the extra copy in the xattr
+ * value since it is already handled by ext4_xattr_ibody_inline_set.
+ * That saves us one memcpy.
+ */
+void ext4_write_inline_data(struct inode *inode, struct ext4_iloc *iloc,
+                           void *buffer, loff_t pos, unsigned int len)
+{
+       struct ext4_xattr_entry *entry;
+       struct ext4_xattr_ibody_header *header;
+       struct ext4_inode *raw_inode;
+       int cp_len = 0;
+
+       BUG_ON(!EXT4_I(inode)->i_inline_off);
+       BUG_ON(pos + len > EXT4_I(inode)->i_inline_size);
+
+       raw_inode = ext4_raw_inode(iloc);
+       buffer += pos;
+
+       if (pos < EXT4_MIN_INLINE_DATA_SIZE) {
+               cp_len = pos + len > EXT4_MIN_INLINE_DATA_SIZE ?
+                        EXT4_MIN_INLINE_DATA_SIZE - pos : len;
+               memcpy((void *)raw_inode->i_block + pos, buffer, cp_len);
+
+               len -= cp_len;
+               buffer += cp_len;
+               pos += cp_len;
+       }
+
+       if (!len)
+               return;
+
+       pos -= EXT4_MIN_INLINE_DATA_SIZE;
+       header = IHDR(inode, raw_inode);
+       entry = (struct ext4_xattr_entry *)((void *)raw_inode +
+                                           EXT4_I(inode)->i_inline_off);
+
+       memcpy((void *)IFIRST(header) + le16_to_cpu(entry->e_value_offs) + pos,
+              buffer, len);
+}
+
+static int ext4_create_inline_data(handle_t *handle,
+                                  struct inode *inode, unsigned len)
+{
+       int error;
+       void *value = NULL;
+       struct ext4_xattr_ibody_find is = {
+               .s = { .not_found = -ENODATA, },
+       };
+       struct ext4_xattr_info i = {
+               .name_index = EXT4_XATTR_INDEX_SYSTEM,
+               .name = EXT4_XATTR_SYSTEM_DATA,
+       };
+
+       error = ext4_get_inode_loc(inode, &is.iloc);
+       if (error)
+               return error;
+
+       error = ext4_journal_get_write_access(handle, is.iloc.bh);
+       if (error)
+               goto out;
+
+       if (len > EXT4_MIN_INLINE_DATA_SIZE) {
+               value = EXT4_ZERO_XATTR_VALUE;
+               len -= EXT4_MIN_INLINE_DATA_SIZE;
+       } else {
+               value = "";
+               len = 0;
+       }
+
+       /* Insert the the xttr entry. */
+       i.value = value;
+       i.value_len = len;
+
+       error = ext4_xattr_ibody_find(inode, &i, &is);
+       if (error)
+               goto out;
+
+       BUG_ON(!is.s.not_found);
+
+       error = ext4_xattr_ibody_inline_set(handle, inode, &i, &is);
+       if (error) {
+               if (error == -ENOSPC)
+                       ext4_clear_inode_state(inode,
+                                              EXT4_STATE_MAY_INLINE_DATA);
+               goto out;
+       }
+
+       memset((void *)ext4_raw_inode(&is.iloc)->i_block,
+               0, EXT4_MIN_INLINE_DATA_SIZE);
+
+       EXT4_I(inode)->i_inline_off = (u16)((void *)is.s.here -
+                                     (void *)ext4_raw_inode(&is.iloc));
+       EXT4_I(inode)->i_inline_size = len + EXT4_MIN_INLINE_DATA_SIZE;
+       ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS);
+       ext4_set_inode_flag(inode, EXT4_INODE_INLINE_DATA);
+       get_bh(is.iloc.bh);
+       error = ext4_mark_iloc_dirty(handle, inode, &is.iloc);
+
+out:
+       brelse(is.iloc.bh);
+       return error;
+}
+
+static int ext4_update_inline_data(handle_t *handle, struct inode *inode,
+                                  unsigned int len)
+{
+       int error;
+       void *value = NULL;
+       struct ext4_xattr_ibody_find is = {
+               .s = { .not_found = -ENODATA, },
+       };
+       struct ext4_xattr_info i = {
+               .name_index = EXT4_XATTR_INDEX_SYSTEM,
+               .name = EXT4_XATTR_SYSTEM_DATA,
+       };
+
+       /* If the old space is ok, write the data directly. */
+       if (len <= EXT4_I(inode)->i_inline_size)
+               return 0;
+
+       error = ext4_get_inode_loc(inode, &is.iloc);
+       if (error)
+               return error;
+
+       error = ext4_xattr_ibody_find(inode, &i, &is);
+       if (error)
+               goto out;
+
+       BUG_ON(is.s.not_found);
+
+       len -= EXT4_MIN_INLINE_DATA_SIZE;
+       value = kzalloc(len, GFP_NOFS);
+       if (!value)
+               goto out;
+
+       error = ext4_xattr_ibody_get(inode, i.name_index, i.name,
+                                    value, len);
+       if (error == -ENODATA)
+               goto out;
+
+       error = ext4_journal_get_write_access(handle, is.iloc.bh);
+       if (error)
+               goto out;
+
+       /* Update the xttr entry. */
+       i.value = value;
+       i.value_len = len;
+
+       error = ext4_xattr_ibody_inline_set(handle, inode, &i, &is);
+       if (error)
+               goto out;
+
+       EXT4_I(inode)->i_inline_off = (u16)((void *)is.s.here -
+                                     (void *)ext4_raw_inode(&is.iloc));
+       EXT4_I(inode)->i_inline_size = EXT4_MIN_INLINE_DATA_SIZE +
+                               le32_to_cpu(is.s.here->e_value_size);
+       ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
+       get_bh(is.iloc.bh);
+       error = ext4_mark_iloc_dirty(handle, inode, &is.iloc);
+
+out:
+       kfree(value);
+       brelse(is.iloc.bh);
+       return error;
+}
+
+int ext4_prepare_inline_data(handle_t *handle, struct inode *inode,
+                            unsigned int len)
+{
+       int ret, size;
+       struct ext4_inode_info *ei = EXT4_I(inode);
+
+       if (!ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA))
+               return -ENOSPC;
+
+       size = ext4_get_max_inline_size(inode);
+       if (size < len)
+               return -ENOSPC;
+
+       down_write(&EXT4_I(inode)->xattr_sem);
+
+       if (ei->i_inline_off)
+               ret = ext4_update_inline_data(handle, inode, len);
+       else
+               ret = ext4_create_inline_data(handle, inode, len);
+
+       up_write(&EXT4_I(inode)->xattr_sem);
+
+       return ret;
+}
+
+static int ext4_destroy_inline_data_nolock(handle_t *handle,
+                                          struct inode *inode)
+{
+       struct ext4_inode_info *ei = EXT4_I(inode);
+       struct ext4_xattr_ibody_find is = {
+               .s = { .not_found = 0, },
+       };
+       struct ext4_xattr_info i = {
+               .name_index = EXT4_XATTR_INDEX_SYSTEM,
+               .name = EXT4_XATTR_SYSTEM_DATA,
+               .value = NULL,
+               .value_len = 0,
+       };
+       int error;
+
+       if (!ei->i_inline_off)
+               return 0;
+
+       error = ext4_get_inode_loc(inode, &is.iloc);
+       if (error)
+               return error;
+
+       error = ext4_xattr_ibody_find(inode, &i, &is);
+       if (error)
+               goto out;
+
+       error = ext4_journal_get_write_access(handle, is.iloc.bh);
+       if (error)
+               goto out;
+
+       error = ext4_xattr_ibody_inline_set(handle, inode, &i, &is);
+       if (error)
+               goto out;
+
+       memset((void *)ext4_raw_inode(&is.iloc)->i_block,
+               0, EXT4_MIN_INLINE_DATA_SIZE);
+
+       if (EXT4_HAS_INCOMPAT_FEATURE(inode->i_sb,
+                                     EXT4_FEATURE_INCOMPAT_EXTENTS)) {
+               if (S_ISDIR(inode->i_mode) ||
+                   S_ISREG(inode->i_mode) || S_ISLNK(inode->i_mode)) {
+                       ext4_set_inode_flag(inode, EXT4_INODE_EXTENTS);
+                       ext4_ext_tree_init(handle, inode);
+               }
+       }
+       ext4_clear_inode_flag(inode, EXT4_INODE_INLINE_DATA);
+
+       get_bh(is.iloc.bh);
+       error = ext4_mark_iloc_dirty(handle, inode, &is.iloc);
+
+       EXT4_I(inode)->i_inline_off = 0;
+       EXT4_I(inode)->i_inline_size = 0;
+       ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
+out:
+       brelse(is.iloc.bh);
+       if (error == -ENODATA)
+               error = 0;
+       return error;
+}
+
+static int ext4_read_inline_page(struct inode *inode, struct page *page)
+{
+       void *kaddr;
+       int ret = 0;
+       size_t len;
+       struct ext4_iloc iloc;
+
+       BUG_ON(!PageLocked(page));
+       BUG_ON(!ext4_has_inline_data(inode));
+       BUG_ON(page->index);
+
+       if (!EXT4_I(inode)->i_inline_off) {
+               ext4_warning(inode->i_sb, "inode %lu doesn't have inline data.",
+                            inode->i_ino);
+               goto out;
+       }
+
+       ret = ext4_get_inode_loc(inode, &iloc);
+       if (ret)
+               goto out;
+
+       len = min_t(size_t, ext4_get_inline_size(inode), i_size_read(inode));
+       kaddr = kmap_atomic(page);
+       ret = ext4_read_inline_data(inode, kaddr, len, &iloc);
+       flush_dcache_page(page);
+       kunmap_atomic(kaddr);
+       zero_user_segment(page, len, PAGE_CACHE_SIZE);
+       SetPageUptodate(page);
+       brelse(iloc.bh);
+
+out:
+       return ret;
+}
+
+int ext4_readpage_inline(struct inode *inode, struct page *page)
+{
+       int ret = 0;
+
+       down_read(&EXT4_I(inode)->xattr_sem);
+       if (!ext4_has_inline_data(inode)) {
+               up_read(&EXT4_I(inode)->xattr_sem);
+               return -EAGAIN;
+       }
+
+       /*
+        * Current inline data can only exist in the 1st page,
+        * So for all the other pages, just set them uptodate.
+        */
+       if (!page->index)
+               ret = ext4_read_inline_page(inode, page);
+       else if (!PageUptodate(page)) {
+               zero_user_segment(page, 0, PAGE_CACHE_SIZE);
+               SetPageUptodate(page);
+       }
+
+       up_read(&EXT4_I(inode)->xattr_sem);
+
+       unlock_page(page);
+       return ret >= 0 ? 0 : ret;
+}
+
+static int ext4_convert_inline_data_to_extent(struct address_space *mapping,
+                                             struct inode *inode,
+                                             unsigned flags)
+{
+       int ret, needed_blocks;
+       handle_t *handle = NULL;
+       int retries = 0, sem_held = 0;
+       struct page *page = NULL;
+       unsigned from, to;
+       struct ext4_iloc iloc;
+
+       if (!ext4_has_inline_data(inode)) {
+               /*
+                * clear the flag so that no new write
+                * will trap here again.
+                */
+               ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
+               return 0;
+       }
+
+       needed_blocks = ext4_writepage_trans_blocks(inode);
+
+       ret = ext4_get_inode_loc(inode, &iloc);
+       if (ret)
+               return ret;
+
+retry:
+       handle = ext4_journal_start(inode, needed_blocks);
+       if (IS_ERR(handle)) {
+               ret = PTR_ERR(handle);
+               handle = NULL;
+               goto out;
+       }
+
+       /* We cannot recurse into the filesystem as the transaction is already
+        * started */
+       flags |= AOP_FLAG_NOFS;
+
+       page = grab_cache_page_write_begin(mapping, 0, flags);
+       if (!page) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       down_write(&EXT4_I(inode)->xattr_sem);
+       sem_held = 1;
+       /* If some one has already done this for us, just exit. */
+       if (!ext4_has_inline_data(inode)) {
+               ret = 0;
+               goto out;
+       }
+
+       from = 0;
+       to = ext4_get_inline_size(inode);
+       if (!PageUptodate(page)) {
+               ret = ext4_read_inline_page(inode, page);
+               if (ret < 0)
+                       goto out;
+       }
+
+       ret = ext4_destroy_inline_data_nolock(handle, inode);
+       if (ret)
+               goto out;
+
+       if (ext4_should_dioread_nolock(inode))
+               ret = __block_write_begin(page, from, to, ext4_get_block_write);
+       else
+               ret = __block_write_begin(page, from, to, ext4_get_block);
+
+       if (!ret && ext4_should_journal_data(inode)) {
+               ret = ext4_walk_page_buffers(handle, page_buffers(page),
+                                            from, to, NULL,
+                                            do_journal_get_write_access);
+       }
+
+       if (ret) {
+               unlock_page(page);
+               page_cache_release(page);
+               ext4_orphan_add(handle, inode);
+               up_write(&EXT4_I(inode)->xattr_sem);
+               sem_held = 0;
+               ext4_journal_stop(handle);
+               handle = NULL;
+               ext4_truncate_failed_write(inode);
+               /*
+                * If truncate failed early the inode might
+                * still be on the orphan list; we need to
+                * make sure the inode is removed from the
+                * orphan list in that case.
+                */
+               if (inode->i_nlink)
+                       ext4_orphan_del(NULL, inode);
+       }
+
+       if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
+               goto retry;
+
+       block_commit_write(page, from, to);
+out:
+       if (page) {
+               unlock_page(page);
+               page_cache_release(page);
+       }
+       if (sem_held)
+               up_write(&EXT4_I(inode)->xattr_sem);
+       if (handle)
+               ext4_journal_stop(handle);
+       brelse(iloc.bh);
+       return ret;
+}
+
+/*
+ * Try to write data in the inode.
+ * If the inode has inline data, check whether the new write can be
+ * in the inode also. If not, create the page the handle, move the data
+ * to the page make it update and let the later codes create extent for it.
+ */
+int ext4_try_to_write_inline_data(struct address_space *mapping,
+                                 struct inode *inode,
+                                 loff_t pos, unsigned len,
+                                 unsigned flags,
+                                 struct page **pagep)
+{
+       int ret;
+       handle_t *handle;
+       struct page *page;
+       struct ext4_iloc iloc;
+
+       if (pos + len > ext4_get_max_inline_size(inode))
+               goto convert;
+
+       ret = ext4_get_inode_loc(inode, &iloc);
+       if (ret)
+               return ret;
+
+       /*
+        * The possible write could happen in the inode,
+        * so try to reserve the space in inode first.
+        */
+       handle = ext4_journal_start(inode, 1);
+       if (IS_ERR(handle)) {
+               ret = PTR_ERR(handle);
+               handle = NULL;
+               goto out;
+       }
+
+       ret = ext4_prepare_inline_data(handle, inode, pos + len);
+       if (ret && ret != -ENOSPC)
+               goto out;
+
+       /* We don't have space in inline inode, so convert it to extent. */
+       if (ret == -ENOSPC) {
+               ext4_journal_stop(handle);
+               brelse(iloc.bh);
+               goto convert;
+       }
+
+       flags |= AOP_FLAG_NOFS;
+
+       page = grab_cache_page_write_begin(mapping, 0, flags);
+       if (!page) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       *pagep = page;
+       down_read(&EXT4_I(inode)->xattr_sem);
+       if (!ext4_has_inline_data(inode)) {
+               ret = 0;
+               unlock_page(page);
+               page_cache_release(page);
+               goto out_up_read;
+       }
+
+       if (!PageUptodate(page)) {
+               ret = ext4_read_inline_page(inode, page);
+               if (ret < 0)
+                       goto out_up_read;
+       }
+
+       ret = 1;
+       handle = NULL;
+out_up_read:
+       up_read(&EXT4_I(inode)->xattr_sem);
+out:
+       if (handle)
+               ext4_journal_stop(handle);
+       brelse(iloc.bh);
+       return ret;
+convert:
+       return ext4_convert_inline_data_to_extent(mapping,
+                                                 inode, flags);
+}
+
+int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len,
+                              unsigned copied, struct page *page)
+{
+       int ret;
+       void *kaddr;
+       struct ext4_iloc iloc;
+
+       if (unlikely(copied < len)) {
+               if (!PageUptodate(page)) {
+                       copied = 0;
+                       goto out;
+               }
+       }
+
+       ret = ext4_get_inode_loc(inode, &iloc);
+       if (ret) {
+               ext4_std_error(inode->i_sb, ret);
+               copied = 0;
+               goto out;
+       }
+
+       down_write(&EXT4_I(inode)->xattr_sem);
+       BUG_ON(!ext4_has_inline_data(inode));
+
+       kaddr = kmap_atomic(page);
+       ext4_write_inline_data(inode, &iloc, kaddr, pos, len);
+       kunmap_atomic(kaddr);
+       SetPageUptodate(page);
+       /* clear page dirty so that writepages wouldn't work for us. */
+       ClearPageDirty(page);
+
+       up_write(&EXT4_I(inode)->xattr_sem);
+       brelse(iloc.bh);
+out:
+       return copied;
+}
+
+struct buffer_head *
+ext4_journalled_write_inline_data(struct inode *inode,
+                                 unsigned len,
+                                 struct page *page)
+{
+       int ret;
+       void *kaddr;
+       struct ext4_iloc iloc;
+
+       ret = ext4_get_inode_loc(inode, &iloc);
+       if (ret) {
+               ext4_std_error(inode->i_sb, ret);
+               return NULL;
+       }
+
+       down_write(&EXT4_I(inode)->xattr_sem);
+       kaddr = kmap_atomic(page);
+       ext4_write_inline_data(inode, &iloc, kaddr, 0, len);
+       kunmap_atomic(kaddr);
+       up_write(&EXT4_I(inode)->xattr_sem);
+
+       return iloc.bh;
+}
+
+/*
+ * Try to make the page cache and handle ready for the inline data case.
+ * We can call this function in 2 cases:
+ * 1. The inode is created and the first write exceeds inline size. We can
+ *    clear the inode state safely.
+ * 2. The inode has inline data, then we need to read the data, make it
+ *    update and dirty so that ext4_da_writepages can handle it. We don't
+ *    need to start the journal since the file's metatdata isn't changed now.
+ */
+static int ext4_da_convert_inline_data_to_extent(struct address_space *mapping,
+                                                struct inode *inode,
+                                                unsigned flags,
+                                                void **fsdata)
+{
+       int ret = 0, inline_size;
+       struct page *page;
+
+       page = grab_cache_page_write_begin(mapping, 0, flags);
+       if (!page)
+               return -ENOMEM;
+
+       down_read(&EXT4_I(inode)->xattr_sem);
+       if (!ext4_has_inline_data(inode)) {
+               ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
+               goto out;
+       }
+
+       inline_size = ext4_get_inline_size(inode);
+
+       if (!PageUptodate(page)) {
+               ret = ext4_read_inline_page(inode, page);
+               if (ret < 0)
+                       goto out;
+       }
+
+       ret = __block_write_begin(page, 0, inline_size,
+                                 ext4_da_get_block_prep);
+       if (ret) {
+               ext4_truncate_failed_write(inode);
+               goto out;
+       }
+
+       SetPageDirty(page);
+       SetPageUptodate(page);
+       ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
+       *fsdata = (void *)CONVERT_INLINE_DATA;
+
+out:
+       up_read(&EXT4_I(inode)->xattr_sem);
+       if (page) {
+               unlock_page(page);
+               page_cache_release(page);
+       }
+       return ret;
+}
+
+/*
+ * Prepare the write for the inline data.
+ * If the the data can be written into the inode, we just read
+ * the page and make it uptodate, and start the journal.
+ * Otherwise read the page, makes it dirty so that it can be
+ * handle in writepages(the i_disksize update is left to the
+ * normal ext4_da_write_end).
+ */
+int ext4_da_write_inline_data_begin(struct address_space *mapping,
+                                   struct inode *inode,
+                                   loff_t pos, unsigned len,
+                                   unsigned flags,
+                                   struct page **pagep,
+                                   void **fsdata)
+{
+       int ret, inline_size;
+       handle_t *handle;
+       struct page *page;
+       struct ext4_iloc iloc;
+
+       ret = ext4_get_inode_loc(inode, &iloc);
+       if (ret)
+               return ret;
+
+       handle = ext4_journal_start(inode, 1);
+       if (IS_ERR(handle)) {
+               ret = PTR_ERR(handle);
+               handle = NULL;
+               goto out;
+       }
+
+       inline_size = ext4_get_max_inline_size(inode);
+
+       ret = -ENOSPC;
+       if (inline_size >= pos + len) {
+               ret = ext4_prepare_inline_data(handle, inode, pos + len);
+               if (ret && ret != -ENOSPC)
+                       goto out;
+       }
+
+       if (ret == -ENOSPC) {
+               ret = ext4_da_convert_inline_data_to_extent(mapping,
+                                                           inode,
+                                                           flags,
+                                                           fsdata);
+               goto out;
+       }
+
+       /*
+        * We cannot recurse into the filesystem as the transaction
+        * is already started.
+        */
+       flags |= AOP_FLAG_NOFS;
+
+       page = grab_cache_page_write_begin(mapping, 0, flags);
+       if (!page) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       down_read(&EXT4_I(inode)->xattr_sem);
+       if (!ext4_has_inline_data(inode)) {
+               ret = 0;
+               goto out_release_page;
+       }
+
+       if (!PageUptodate(page)) {
+               ret = ext4_read_inline_page(inode, page);
+               if (ret < 0)
+                       goto out_release_page;
+       }
+
+       up_read(&EXT4_I(inode)->xattr_sem);
+       *pagep = page;
+       handle = NULL;
+       brelse(iloc.bh);
+       return 1;
+out_release_page:
+       up_read(&EXT4_I(inode)->xattr_sem);
+       unlock_page(page);
+       page_cache_release(page);
+out:
+       if (handle)
+               ext4_journal_stop(handle);
+       brelse(iloc.bh);
+       return ret;
+}
+
+int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos,
+                                 unsigned len, unsigned copied,
+                                 struct page *page)
+{
+       int i_size_changed = 0;
+
+       copied = ext4_write_inline_data_end(inode, pos, len, copied, page);
+
+       /*
+        * No need to use i_size_read() here, the i_size
+        * cannot change under us because we hold i_mutex.
+        *
+        * But it's important to update i_size while still holding page lock:
+        * page writeout could otherwise come in and zero beyond i_size.
+        */
+       if (pos+copied > inode->i_size) {
+               i_size_write(inode, pos+copied);
+               i_size_changed = 1;
+       }
+       unlock_page(page);
+       page_cache_release(page);
+
+       /*
+        * Don't mark the inode dirty under page lock. First, it unnecessarily
+        * makes the holding time of page lock longer. Second, it forces lock
+        * ordering of page lock and transaction start for journaling
+        * filesystems.
+        */
+       if (i_size_changed)
+               mark_inode_dirty(inode);
+
+       return copied;
+}
+
+#ifdef INLINE_DIR_DEBUG
+void ext4_show_inline_dir(struct inode *dir, struct buffer_head *bh,
+                         void *inline_start, int inline_size)
+{
+       int offset;
+       unsigned short de_len;
+       struct ext4_dir_entry_2 *de = inline_start;
+       void *dlimit = inline_start + inline_size;
+
+       trace_printk("inode %lu\n", dir->i_ino);
+       offset = 0;
+       while ((void *)de < dlimit) {
+               de_len = ext4_rec_len_from_disk(de->rec_len, inline_size);
+               trace_printk("de: off %u rlen %u name %*.s nlen %u ino %u\n",
+                            offset, de_len, de->name_len, de->name,
+                            de->name_len, le32_to_cpu(de->inode));
+               if (ext4_check_dir_entry(dir, NULL, de, bh,
+                                        inline_start, inline_size, offset))
+                       BUG();
+
+               offset += de_len;
+               de = (struct ext4_dir_entry_2 *) ((char *) de + de_len);
+       }
+}
+#else
+#define ext4_show_inline_dir(dir, bh, inline_start, inline_size)
+#endif
+
+/*
+ * Add a new entry into a inline dir.
+ * It will return -ENOSPC if no space is available, and -EIO
+ * and -EEXIST if directory entry already exists.
+ */
+static int ext4_add_dirent_to_inline(handle_t *handle,
+                                    struct dentry *dentry,
+                                    struct inode *inode,
+                                    struct ext4_iloc *iloc,
+                                    void *inline_start, int inline_size)
+{
+       struct inode    *dir = dentry->d_parent->d_inode;
+       const char      *name = dentry->d_name.name;
+       int             namelen = dentry->d_name.len;
+       unsigned short  reclen;
+       int             err;
+       struct ext4_dir_entry_2 *de;
+
+       reclen = EXT4_DIR_REC_LEN(namelen);
+       err = ext4_find_dest_de(dir, inode, iloc->bh,
+                               inline_start, inline_size,
+                               name, namelen, &de);
+       if (err)
+               return err;
+
+       err = ext4_journal_get_write_access(handle, iloc->bh);
+       if (err)
+               return err;
+       ext4_insert_dentry(inode, de, inline_size, name, namelen);
+
+       ext4_show_inline_dir(dir, iloc->bh, inline_start, inline_size);
+
+       /*
+        * XXX shouldn't update any times until successful
+        * completion of syscall, but too many callers depend
+        * on this.
+        *
+        * XXX similarly, too many callers depend on
+        * ext4_new_inode() setting the times, but error
+        * recovery deletes the inode, so the worst that can
+        * happen is that the times are slightly out of date
+        * and/or different from the directory change time.
+        */
+       dir->i_mtime = dir->i_ctime = ext4_current_time(dir);
+       ext4_update_dx_flag(dir);
+       dir->i_version++;
+       ext4_mark_inode_dirty(handle, dir);
+       return 1;
+}
+
+static void *ext4_get_inline_xattr_pos(struct inode *inode,
+                                      struct ext4_iloc *iloc)
+{
+       struct ext4_xattr_entry *entry;
+       struct ext4_xattr_ibody_header *header;
+
+       BUG_ON(!EXT4_I(inode)->i_inline_off);
+
+       header = IHDR(inode, ext4_raw_inode(iloc));
+       entry = (struct ext4_xattr_entry *)((void *)ext4_raw_inode(iloc) +
+                                           EXT4_I(inode)->i_inline_off);
+
+       return (void *)IFIRST(header) + le16_to_cpu(entry->e_value_offs);
+}
+
+/* Set the final de to cover the whole block. */
+static void ext4_update_final_de(void *de_buf, int old_size, int new_size)
+{
+       struct ext4_dir_entry_2 *de, *prev_de;
+       void *limit;
+       int de_len;
+
+       de = (struct ext4_dir_entry_2 *)de_buf;
+       if (old_size) {
+               limit = de_buf + old_size;
+               do {
+                       prev_de = de;
+                       de_len = ext4_rec_len_from_disk(de->rec_len, old_size);
+                       de_buf += de_len;
+                       de = (struct ext4_dir_entry_2 *)de_buf;
+               } while (de_buf < limit);
+
+               prev_de->rec_len = ext4_rec_len_to_disk(de_len + new_size -
+                                                       old_size, new_size);
+       } else {
+               /* this is just created, so create an empty entry. */
+               de->inode = 0;
+               de->rec_len = ext4_rec_len_to_disk(new_size, new_size);
+       }
+}
+
+static int ext4_update_inline_dir(handle_t *handle, struct inode *dir,
+                                 struct ext4_iloc *iloc)
+{
+       int ret;
+       int old_size = EXT4_I(dir)->i_inline_size - EXT4_MIN_INLINE_DATA_SIZE;
+       int new_size = get_max_inline_xattr_value_size(dir, iloc);
+
+       if (new_size - old_size <= EXT4_DIR_REC_LEN(1))
+               return -ENOSPC;
+
+       ret = ext4_update_inline_data(handle, dir,
+                                     new_size + EXT4_MIN_INLINE_DATA_SIZE);
+       if (ret)
+               return ret;
+
+       ext4_update_final_de(ext4_get_inline_xattr_pos(dir, iloc), old_size,
+                            EXT4_I(dir)->i_inline_size -
+                                               EXT4_MIN_INLINE_DATA_SIZE);
+       dir->i_size = EXT4_I(dir)->i_disksize = EXT4_I(dir)->i_inline_size;
+       return 0;
+}
+
+static void ext4_restore_inline_data(handle_t *handle, struct inode *inode,
+                                    struct ext4_iloc *iloc,
+                                    void *buf, int inline_size)
+{
+       ext4_create_inline_data(handle, inode, inline_size);
+       ext4_write_inline_data(inode, iloc, buf, 0, inline_size);
+       ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
+}
+
+static int ext4_finish_convert_inline_dir(handle_t *handle,
+                                         struct inode *inode,
+                                         struct buffer_head *dir_block,
+                                         void *buf,
+                                         int inline_size)
+{
+       int err, csum_size = 0, header_size = 0;
+       struct ext4_dir_entry_2 *de;
+       struct ext4_dir_entry_tail *t;
+       void *target = dir_block->b_data;
+
+       /*
+        * First create "." and ".." and then copy the dir information
+        * back to the block.
+        */
+       de = (struct ext4_dir_entry_2 *)target;
+       de = ext4_init_dot_dotdot(inode, de,
+               inode->i_sb->s_blocksize, csum_size,
+               le32_to_cpu(((struct ext4_dir_entry_2 *)buf)->inode), 1);
+       header_size = (void *)de - target;
+
+       memcpy((void *)de, buf + EXT4_INLINE_DOTDOT_SIZE,
+               inline_size - EXT4_INLINE_DOTDOT_SIZE);
+
+       if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
+                                      EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+               csum_size = sizeof(struct ext4_dir_entry_tail);
+
+       inode->i_size = inode->i_sb->s_blocksize;
+       i_size_write(inode, inode->i_sb->s_blocksize);
+       EXT4_I(inode)->i_disksize = inode->i_sb->s_blocksize;
+       ext4_update_final_de(dir_block->b_data,
+                       inline_size - EXT4_INLINE_DOTDOT_SIZE + header_size,
+                       inode->i_sb->s_blocksize - csum_size);
+
+       if (csum_size) {
+               t = EXT4_DIRENT_TAIL(dir_block->b_data,
+                                    inode->i_sb->s_blocksize);
+               initialize_dirent_tail(t, inode->i_sb->s_blocksize);
+       }
+       set_buffer_uptodate(dir_block);
+       err = ext4_handle_dirty_dirent_node(handle, inode, dir_block);
+       if (err)
+               goto out;
+       set_buffer_verified(dir_block);
+out:
+       return err;
+}
+
+static int ext4_convert_inline_data_nolock(handle_t *handle,
+                                          struct inode *inode,
+                                          struct ext4_iloc *iloc)
+{
+       int error;
+       void *buf = NULL;
+       struct buffer_head *data_bh = NULL;
+       struct ext4_map_blocks map;
+       int inline_size;
+
+       inline_size = ext4_get_inline_size(inode);
+       buf = kmalloc(inline_size, GFP_NOFS);
+       if (!buf) {
+               error = -ENOMEM;
+               goto out;
+       }
+
+       error = ext4_read_inline_data(inode, buf, inline_size, iloc);
+       if (error < 0)
+               goto out;
+
+       error = ext4_destroy_inline_data_nolock(handle, inode);
+       if (error)
+               goto out;
+
+       map.m_lblk = 0;
+       map.m_len = 1;
+       map.m_flags = 0;
+       error = ext4_map_blocks(handle, inode, &map, EXT4_GET_BLOCKS_CREATE);
+       if (error < 0)
+               goto out_restore;
+       if (!(map.m_flags & EXT4_MAP_MAPPED)) {
+               error = -EIO;
+               goto out_restore;
+       }
+
+       data_bh = sb_getblk(inode->i_sb, map.m_pblk);
+       if (!data_bh) {
+               error = -EIO;
+               goto out_restore;
+       }
+
+       lock_buffer(data_bh);
+       error = ext4_journal_get_create_access(handle, data_bh);
+       if (error) {
+               unlock_buffer(data_bh);
+               error = -EIO;
+               goto out_restore;
+       }
+       memset(data_bh->b_data, 0, inode->i_sb->s_blocksize);
+
+       if (!S_ISDIR(inode->i_mode)) {
+               memcpy(data_bh->b_data, buf, inline_size);
+               set_buffer_uptodate(data_bh);
+               error = ext4_handle_dirty_metadata(handle,
+                                                  inode, data_bh);
+       } else {
+               error = ext4_finish_convert_inline_dir(handle, inode, data_bh,
+                                                      buf, inline_size);
+       }
+
+       unlock_buffer(data_bh);
+out_restore:
+       if (error)
+               ext4_restore_inline_data(handle, inode, iloc, buf, inline_size);
+
+out:
+       brelse(data_bh);
+       kfree(buf);
+       return error;
+}
+
+/*
+ * Try to add the new entry to the inline data.
+ * If succeeds, return 0. If not, extended the inline dir and copied data to
+ * the new created block.
+ */
+int ext4_try_add_inline_entry(handle_t *handle, struct dentry *dentry,
+                             struct inode *inode)
+{
+       int ret, inline_size;
+       void *inline_start;
+       struct ext4_iloc iloc;
+       struct inode *dir = dentry->d_parent->d_inode;
+
+       ret = ext4_get_inode_loc(dir, &iloc);
+       if (ret)
+               return ret;
+
+       down_write(&EXT4_I(dir)->xattr_sem);
+       if (!ext4_has_inline_data(dir))
+               goto out;
+
+       inline_start = (void *)ext4_raw_inode(&iloc)->i_block +
+                                                EXT4_INLINE_DOTDOT_SIZE;
+       inline_size = EXT4_MIN_INLINE_DATA_SIZE - EXT4_INLINE_DOTDOT_SIZE;
+
+       ret = ext4_add_dirent_to_inline(handle, dentry, inode, &iloc,
+                                       inline_start, inline_size);
+       if (ret != -ENOSPC)
+               goto out;
+
+       /* check whether it can be inserted to inline xattr space. */
+       inline_size = EXT4_I(dir)->i_inline_size -
+                       EXT4_MIN_INLINE_DATA_SIZE;
+       if (!inline_size) {
+               /* Try to use the xattr space.*/
+               ret = ext4_update_inline_dir(handle, dir, &iloc);
+               if (ret && ret != -ENOSPC)
+                       goto out;
+
+               inline_size = EXT4_I(dir)->i_inline_size -
+                               EXT4_MIN_INLINE_DATA_SIZE;
+       }
+
+       if (inline_size) {
+               inline_start = ext4_get_inline_xattr_pos(dir, &iloc);
+
+               ret = ext4_add_dirent_to_inline(handle, dentry, inode, &iloc,
+                                               inline_start, inline_size);
+
+               if (ret != -ENOSPC)
+                       goto out;
+       }
+
+       /*
+        * The inline space is filled up, so create a new block for it.
+        * As the extent tree will be created, we have to save the inline
+        * dir first.
+        */
+       ret = ext4_convert_inline_data_nolock(handle, dir, &iloc);
+
+out:
+       ext4_mark_inode_dirty(handle, dir);
+       up_write(&EXT4_I(dir)->xattr_sem);
+       brelse(iloc.bh);
+       return ret;
+}
+
+int ext4_read_inline_dir(struct file *filp,
+                        void *dirent, filldir_t filldir,
+                        int *has_inline_data)
+{
+       int error = 0;
+       unsigned int offset, parent_ino;
+       int i, stored;
+       struct ext4_dir_entry_2 *de;
+       struct super_block *sb;
+       struct inode *inode = filp->f_path.dentry->d_inode;
+       int ret, inline_size = 0;
+       struct ext4_iloc iloc;
+       void *dir_buf = NULL;
+
+       ret = ext4_get_inode_loc(inode, &iloc);
+       if (ret)
+               return ret;
+
+       down_read(&EXT4_I(inode)->xattr_sem);
+       if (!ext4_has_inline_data(inode)) {
+               up_read(&EXT4_I(inode)->xattr_sem);
+               *has_inline_data = 0;
+               goto out;
+       }
+
+       inline_size = ext4_get_inline_size(inode);
+       dir_buf = kmalloc(inline_size, GFP_NOFS);
+       if (!dir_buf) {
+               ret = -ENOMEM;
+               up_read(&EXT4_I(inode)->xattr_sem);
+               goto out;
+       }
+
+       ret = ext4_read_inline_data(inode, dir_buf, inline_size, &iloc);
+       up_read(&EXT4_I(inode)->xattr_sem);
+       if (ret < 0)
+               goto out;
+
+       sb = inode->i_sb;
+       stored = 0;
+       parent_ino = le32_to_cpu(((struct ext4_dir_entry_2 *)dir_buf)->inode);
+
+       while (!error && !stored && filp->f_pos < inode->i_size) {
+revalidate:
+               /*
+                * If the version has changed since the last call to
+                * readdir(2), then we might be pointing to an invalid
+                * dirent right now.  Scan from the start of the inline
+                * dir to make sure.
+                */
+               if (filp->f_version != inode->i_version) {
+                       for (i = 0;
+                            i < inode->i_size && i < offset;) {
+                               if (!i) {
+                                       /* skip "." and ".." if needed. */
+                                       i += EXT4_INLINE_DOTDOT_SIZE;
+                                       continue;
+                               }
+                               de = (struct ext4_dir_entry_2 *)
+                                       (dir_buf + i);
+                               /* It's too expensive to do a full
+                                * dirent test each time round this
+                                * loop, but we do have to test at
+                                * least that it is non-zero.  A
+                                * failure will be detected in the
+                                * dirent test below. */
+                               if (ext4_rec_len_from_disk(de->rec_len,
+                                       inline_size) < EXT4_DIR_REC_LEN(1))
+                                       break;
+                               i += ext4_rec_len_from_disk(de->rec_len,
+                                                           inline_size);
+                       }
+                       offset = i;
+                       filp->f_pos = offset;
+                       filp->f_version = inode->i_version;
+               }
+
+               while (!error && filp->f_pos < inode->i_size) {
+                       if (filp->f_pos == 0) {
+                               error = filldir(dirent, ".", 1, 0, inode->i_ino,
+                                               DT_DIR);
+                               if (error)
+                                       break;
+                               stored++;
+
+                               error = filldir(dirent, "..", 2, 0, parent_ino,
+                                               DT_DIR);
+                               if (error)
+                                       break;
+                               stored++;
+
+                               filp->f_pos = offset = EXT4_INLINE_DOTDOT_SIZE;
+                               continue;
+                       }
+
+                       de = (struct ext4_dir_entry_2 *)(dir_buf + offset);
+                       if (ext4_check_dir_entry(inode, filp, de,
+                                                iloc.bh, dir_buf,
+                                                inline_size, offset)) {
+                               ret = stored;
+                               goto out;
+                       }
+                       offset += ext4_rec_len_from_disk(de->rec_len,
+                                                        inline_size);
+                       if (le32_to_cpu(de->inode)) {
+                               /* We might block in the next section
+                                * if the data destination is
+                                * currently swapped out.  So, use a
+                                * version stamp to detect whether or
+                                * not the directory has been modified
+                                * during the copy operation.
+                                */
+                               u64 version = filp->f_version;
+
+                               error = filldir(dirent, de->name,
+                                               de->name_len,
+                                               filp->f_pos,
+                                               le32_to_cpu(de->inode),
+                                               get_dtype(sb, de->file_type));
+                               if (error)
+                                       break;
+                               if (version != filp->f_version)
+                                       goto revalidate;
+                               stored++;
+                       }
+                       filp->f_pos += ext4_rec_len_from_disk(de->rec_len,
+                                                             inline_size);
+               }
+               offset = 0;
+       }
+out:
+       kfree(dir_buf);
+       brelse(iloc.bh);
+       return ret;
+}
+
+struct buffer_head *ext4_get_first_inline_block(struct inode *inode,
+                                       struct ext4_dir_entry_2 **parent_de,
+                                       int *retval)
+{
+       struct ext4_iloc iloc;
+
+       *retval = ext4_get_inode_loc(inode, &iloc);
+       if (*retval)
+               return NULL;
+
+       *parent_de = (struct ext4_dir_entry_2 *)ext4_raw_inode(&iloc)->i_block;
+
+       return iloc.bh;
+}
+
+/*
+ * Try to create the inline data for the new dir.
+ * If it succeeds, return 0, otherwise return the error.
+ * In case of ENOSPC, the caller should create the normal disk layout dir.
+ */
+int ext4_try_create_inline_dir(handle_t *handle, struct inode *parent,
+                              struct inode *inode)
+{
+       int ret, inline_size = EXT4_MIN_INLINE_DATA_SIZE;
+       struct ext4_iloc iloc;
+       struct ext4_dir_entry_2 *de;
+
+       ret = ext4_get_inode_loc(inode, &iloc);
+       if (ret)
+               return ret;
+
+       ret = ext4_prepare_inline_data(handle, inode, inline_size);
+       if (ret)
+               goto out;
+
+       /*
+        * For inline dir, we only save the inode information for the ".."
+        * and create a fake dentry to cover the left space.
+        */
+       de = (struct ext4_dir_entry_2 *)ext4_raw_inode(&iloc)->i_block;
+       de->inode = cpu_to_le32(parent->i_ino);
+       de = (struct ext4_dir_entry_2 *)((void *)de + EXT4_INLINE_DOTDOT_SIZE);
+       de->inode = 0;
+       de->rec_len = ext4_rec_len_to_disk(
+                               inline_size - EXT4_INLINE_DOTDOT_SIZE,
+                               inline_size);
+       set_nlink(inode, 2);
+       inode->i_size = EXT4_I(inode)->i_disksize = inline_size;
+out:
+       brelse(iloc.bh);
+       return ret;
+}
+
+struct buffer_head *ext4_find_inline_entry(struct inode *dir,
+                                       const struct qstr *d_name,
+                                       struct ext4_dir_entry_2 **res_dir,
+                                       int *has_inline_data)
+{
+       int ret;
+       struct ext4_iloc iloc;
+       void *inline_start;
+       int inline_size;
+
+       if (ext4_get_inode_loc(dir, &iloc))
+               return NULL;
+
+       down_read(&EXT4_I(dir)->xattr_sem);
+       if (!ext4_has_inline_data(dir)) {
+               *has_inline_data = 0;
+               goto out;
+       }
+
+       inline_start = (void *)ext4_raw_inode(&iloc)->i_block +
+                                               EXT4_INLINE_DOTDOT_SIZE;
+       inline_size = EXT4_MIN_INLINE_DATA_SIZE - EXT4_INLINE_DOTDOT_SIZE;
+       ret = search_dir(iloc.bh, inline_start, inline_size,
+                        dir, d_name, 0, res_dir);
+       if (ret == 1)
+               goto out_find;
+       if (ret < 0)
+               goto out;
+
+       if (ext4_get_inline_size(dir) == EXT4_MIN_INLINE_DATA_SIZE)
+               goto out;
+
+       inline_start = ext4_get_inline_xattr_pos(dir, &iloc);
+       inline_size = ext4_get_inline_size(dir) - EXT4_MIN_INLINE_DATA_SIZE;
+
+       ret = search_dir(iloc.bh, inline_start, inline_size,
+                        dir, d_name, 0, res_dir);
+       if (ret == 1)
+               goto out_find;
+
+out:
+       brelse(iloc.bh);
+       iloc.bh = NULL;
+out_find:
+       up_read(&EXT4_I(dir)->xattr_sem);
+       return iloc.bh;
+}
+
+int ext4_delete_inline_entry(handle_t *handle,
+                            struct inode *dir,
+                            struct ext4_dir_entry_2 *de_del,
+                            struct buffer_head *bh,
+                            int *has_inline_data)
+{
+       int err, inline_size;
+       struct ext4_iloc iloc;
+       void *inline_start;
+
+       err = ext4_get_inode_loc(dir, &iloc);
+       if (err)
+               return err;
+
+       down_write(&EXT4_I(dir)->xattr_sem);
+       if (!ext4_has_inline_data(dir)) {
+               *has_inline_data = 0;
+               goto out;
+       }
+
+       if ((void *)de_del - ((void *)ext4_raw_inode(&iloc)->i_block) <
+               EXT4_MIN_INLINE_DATA_SIZE) {
+               inline_start = (void *)ext4_raw_inode(&iloc)->i_block +
+                                       EXT4_INLINE_DOTDOT_SIZE;
+               inline_size = EXT4_MIN_INLINE_DATA_SIZE -
+                               EXT4_INLINE_DOTDOT_SIZE;
+       } else {
+               inline_start = ext4_get_inline_xattr_pos(dir, &iloc);
+               inline_size = ext4_get_inline_size(dir) -
+                               EXT4_MIN_INLINE_DATA_SIZE;
+       }
+
+       err = ext4_journal_get_write_access(handle, bh);
+       if (err)
+               goto out;
+
+       err = ext4_generic_delete_entry(handle, dir, de_del, bh,
+                                       inline_start, inline_size, 0);
+       if (err)
+               goto out;
+
+       BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
+       err = ext4_mark_inode_dirty(handle, dir);
+       if (unlikely(err))
+               goto out;
+
+       ext4_show_inline_dir(dir, iloc.bh, inline_start, inline_size);
+out:
+       up_write(&EXT4_I(dir)->xattr_sem);
+       brelse(iloc.bh);
+       if (err != -ENOENT)
+               ext4_std_error(dir->i_sb, err);
+       return err;
+}
+
+/*
+ * Get the inline dentry at offset.
+ */
+static inline struct ext4_dir_entry_2 *
+ext4_get_inline_entry(struct inode *inode,
+                     struct ext4_iloc *iloc,
+                     unsigned int offset,
+                     void **inline_start,
+                     int *inline_size)
+{
+       void *inline_pos;
+
+       BUG_ON(offset > ext4_get_inline_size(inode));
+
+       if (offset < EXT4_MIN_INLINE_DATA_SIZE) {
+               inline_pos = (void *)ext4_raw_inode(iloc)->i_block;
+               *inline_size = EXT4_MIN_INLINE_DATA_SIZE;
+       } else {
+               inline_pos = ext4_get_inline_xattr_pos(inode, iloc);
+               offset -= EXT4_MIN_INLINE_DATA_SIZE;
+               *inline_size = ext4_get_inline_size(inode) -
+                               EXT4_MIN_INLINE_DATA_SIZE;
+       }
+
+       if (inline_start)
+               *inline_start = inline_pos;
+       return (struct ext4_dir_entry_2 *)(inline_pos + offset);
+}
+
+int empty_inline_dir(struct inode *dir, int *has_inline_data)
+{
+       int err, inline_size;
+       struct ext4_iloc iloc;
+       void *inline_pos;
+       unsigned int offset;
+       struct ext4_dir_entry_2 *de;
+       int ret = 1;
+
+       err = ext4_get_inode_loc(dir, &iloc);
+       if (err) {
+               EXT4_ERROR_INODE(dir, "error %d getting inode %lu block",
+                                err, dir->i_ino);
+               return 1;
+       }
+
+       down_read(&EXT4_I(dir)->xattr_sem);
+       if (!ext4_has_inline_data(dir)) {
+               *has_inline_data = 0;
+               goto out;
+       }
+
+       de = (struct ext4_dir_entry_2 *)ext4_raw_inode(&iloc)->i_block;
+       if (!le32_to_cpu(de->inode)) {
+               ext4_warning(dir->i_sb,
+                            "bad inline directory (dir #%lu) - no `..'",
+                            dir->i_ino);
+               ret = 1;
+               goto out;
+       }
+
+       offset = EXT4_INLINE_DOTDOT_SIZE;
+       while (offset < dir->i_size) {
+               de = ext4_get_inline_entry(dir, &iloc, offset,
+                                          &inline_pos, &inline_size);
+               if (ext4_check_dir_entry(dir, NULL, de,
+                                        iloc.bh, inline_pos,
+                                        inline_size, offset)) {
+                       ext4_warning(dir->i_sb,
+                                    "bad inline directory (dir #%lu) - "
+                                    "inode %u, rec_len %u, name_len %d"
+                                    "inline size %d\n",
+                                    dir->i_ino, le32_to_cpu(de->inode),
+                                    le16_to_cpu(de->rec_len), de->name_len,
+                                    inline_size);
+                       ret = 1;
+                       goto out;
+               }
+               if (le32_to_cpu(de->inode)) {
+                       ret = 0;
+                       goto out;
+               }
+               offset += ext4_rec_len_from_disk(de->rec_len, inline_size);
+       }
+
+out:
+       up_read(&EXT4_I(dir)->xattr_sem);
+       brelse(iloc.bh);
+       return ret;
+}
+
+int ext4_destroy_inline_data(handle_t *handle, struct inode *inode)
+{
+       int ret;
+
+       down_write(&EXT4_I(inode)->xattr_sem);
+       ret = ext4_destroy_inline_data_nolock(handle, inode);
+       up_write(&EXT4_I(inode)->xattr_sem);
+
+       return ret;
+}
+
+int ext4_inline_data_fiemap(struct inode *inode,
+                           struct fiemap_extent_info *fieinfo,
+                           int *has_inline)
+{
+       __u64 physical = 0;
+       __u64 length;
+       __u32 flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_LAST;
+       int error = 0;
+       struct ext4_iloc iloc;
+
+       down_read(&EXT4_I(inode)->xattr_sem);
+       if (!ext4_has_inline_data(inode)) {
+               *has_inline = 0;
+               goto out;
+       }
+
+       error = ext4_get_inode_loc(inode, &iloc);
+       if (error)
+               goto out;
+
+       physical = iloc.bh->b_blocknr << inode->i_sb->s_blocksize_bits;
+       physical += (char *)ext4_raw_inode(&iloc) - iloc.bh->b_data;
+       physical += offsetof(struct ext4_inode, i_block);
+       length = i_size_read(inode);
+
+       if (physical)
+               error = fiemap_fill_next_extent(fieinfo, 0, physical,
+                                               length, flags);
+       brelse(iloc.bh);
+out:
+       up_read(&EXT4_I(inode)->xattr_sem);
+       return (error < 0 ? error : 0);
+}
+
+/*
+ * Called during xattr set, and if we can sparse space 'needed',
+ * just create the extent tree evict the data to the outer block.
+ *
+ * We use jbd2 instead of page cache to move data to the 1st block
+ * so that the whole transaction can be committed as a whole and
+ * the data isn't lost because of the delayed page cache write.
+ */
+int ext4_try_to_evict_inline_data(handle_t *handle,
+                                 struct inode *inode,
+                                 int needed)
+{
+       int error;
+       struct ext4_xattr_entry *entry;
+       struct ext4_xattr_ibody_header *header;
+       struct ext4_inode *raw_inode;
+       struct ext4_iloc iloc;
+
+       error = ext4_get_inode_loc(inode, &iloc);
+       if (error)
+               return error;
+
+       raw_inode = ext4_raw_inode(&iloc);
+       header = IHDR(inode, raw_inode);
+       entry = (struct ext4_xattr_entry *)((void *)raw_inode +
+                                           EXT4_I(inode)->i_inline_off);
+       if (EXT4_XATTR_LEN(entry->e_name_len) +
+           EXT4_XATTR_SIZE(le32_to_cpu(entry->e_value_size)) < needed) {
+               error = -ENOSPC;
+               goto out;
+       }
+
+       error = ext4_convert_inline_data_nolock(handle, inode, &iloc);
+out:
+       brelse(iloc.bh);
+       return error;
+}
+
+void ext4_inline_data_truncate(struct inode *inode, int *has_inline)
+{
+       handle_t *handle;
+       int inline_size, value_len, needed_blocks;
+       size_t i_size;
+       void *value = NULL;
+       struct ext4_xattr_ibody_find is = {
+               .s = { .not_found = -ENODATA, },
+       };
+       struct ext4_xattr_info i = {
+               .name_index = EXT4_XATTR_INDEX_SYSTEM,
+               .name = EXT4_XATTR_SYSTEM_DATA,
+       };
+
+
+       needed_blocks = ext4_writepage_trans_blocks(inode);
+       handle = ext4_journal_start(inode, needed_blocks);
+       if (IS_ERR(handle))
+               return;
+
+       down_write(&EXT4_I(inode)->xattr_sem);
+       if (!ext4_has_inline_data(inode)) {
+               *has_inline = 0;
+               ext4_journal_stop(handle);
+               return;
+       }
+
+       if (ext4_orphan_add(handle, inode))
+               goto out;
+
+       if (ext4_get_inode_loc(inode, &is.iloc))
+               goto out;
+
+       down_write(&EXT4_I(inode)->i_data_sem);
+       i_size = inode->i_size;
+       inline_size = ext4_get_inline_size(inode);
+       EXT4_I(inode)->i_disksize = i_size;
+
+       if (i_size < inline_size) {
+               /* Clear the content in the xattr space. */
+               if (inline_size > EXT4_MIN_INLINE_DATA_SIZE) {
+                       if (ext4_xattr_ibody_find(inode, &i, &is))
+                               goto out_error;
+
+                       BUG_ON(is.s.not_found);
+
+                       value_len = le32_to_cpu(is.s.here->e_value_size);
+                       value = kmalloc(value_len, GFP_NOFS);
+                       if (!value)
+                               goto out_error;
+
+                       if (ext4_xattr_ibody_get(inode, i.name_index, i.name,
+                                               value, value_len))
+                               goto out_error;
+
+                       i.value = value;
+                       i.value_len = i_size > EXT4_MIN_INLINE_DATA_SIZE ?
+                                       i_size - EXT4_MIN_INLINE_DATA_SIZE : 0;
+                       if (ext4_xattr_ibody_inline_set(handle, inode, &i, &is))
+                               goto out_error;
+               }
+
+               /* Clear the content within i_blocks. */
+               if (i_size < EXT4_MIN_INLINE_DATA_SIZE)
+                       memset(ext4_raw_inode(&is.iloc)->i_block + i_size, 0,
+                                       EXT4_MIN_INLINE_DATA_SIZE - i_size);
+
+               EXT4_I(inode)->i_inline_size = i_size <
+                                       EXT4_MIN_INLINE_DATA_SIZE ?
+                                       EXT4_MIN_INLINE_DATA_SIZE : i_size;
+       }
+
+out_error:
+       up_write(&EXT4_I(inode)->i_data_sem);
+out:
+       brelse(is.iloc.bh);
+       up_write(&EXT4_I(inode)->xattr_sem);
+       kfree(value);
+       if (inode->i_nlink)
+               ext4_orphan_del(handle, inode);
+
+       inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
+       ext4_mark_inode_dirty(handle, inode);
+       if (IS_SYNC(inode))
+               ext4_handle_sync(handle);
+
+       ext4_journal_stop(handle);
+       return;
+}
+
+int ext4_convert_inline_data(struct inode *inode)
+{
+       int error, needed_blocks;
+       handle_t *handle;
+       struct ext4_iloc iloc;
+
+       if (!ext4_has_inline_data(inode)) {
+               ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
+               return 0;
+       }
+
+       needed_blocks = ext4_writepage_trans_blocks(inode);
+
+       iloc.bh = NULL;
+       error = ext4_get_inode_loc(inode, &iloc);
+       if (error)
+               return error;
+
+       handle = ext4_journal_start(inode, needed_blocks);
+       if (IS_ERR(handle)) {
+               error = PTR_ERR(handle);
+               goto out_free;
+       }
+
+       down_write(&EXT4_I(inode)->xattr_sem);
+       if (!ext4_has_inline_data(inode)) {
+               up_write(&EXT4_I(inode)->xattr_sem);
+               goto out;
+       }
+
+       error = ext4_convert_inline_data_nolock(handle, inode, &iloc);
+       up_write(&EXT4_I(inode)->xattr_sem);
+out:
+       ext4_journal_stop(handle);
+out_free:
+       brelse(iloc.bh);
+       return error;
+}
index b3c243b..cb1c1ab 100644 (file)
@@ -484,49 +484,6 @@ static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx,
 }
 
 /*
- * Sets the BH_Da_Mapped bit on the buffer heads corresponding to the given map.
- */
-static void set_buffers_da_mapped(struct inode *inode,
-                                  struct ext4_map_blocks *map)
-{
-       struct address_space *mapping = inode->i_mapping;
-       struct pagevec pvec;
-       int i, nr_pages;
-       pgoff_t index, end;
-
-       index = map->m_lblk >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
-       end = (map->m_lblk + map->m_len - 1) >>
-               (PAGE_CACHE_SHIFT - inode->i_blkbits);
-
-       pagevec_init(&pvec, 0);
-       while (index <= end) {
-               nr_pages = pagevec_lookup(&pvec, mapping, index,
-                                         min(end - index + 1,
-                                             (pgoff_t)PAGEVEC_SIZE));
-               if (nr_pages == 0)
-                       break;
-               for (i = 0; i < nr_pages; i++) {
-                       struct page *page = pvec.pages[i];
-                       struct buffer_head *bh, *head;
-
-                       if (unlikely(page->mapping != mapping) ||
-                           !PageDirty(page))
-                               break;
-
-                       if (page_has_buffers(page)) {
-                               bh = head = page_buffers(page);
-                               do {
-                                       set_buffer_da_mapped(bh);
-                                       bh = bh->b_this_page;
-                               } while (bh != head);
-                       }
-                       index++;
-               }
-               pagevec_release(&pvec);
-       }
-}
-
-/*
  * The ext4_map_blocks() function tries to look up the requested blocks,
  * and returns if the blocks are already mapped.
  *
@@ -574,7 +531,16 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
                up_read((&EXT4_I(inode)->i_data_sem));
 
        if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
-               int ret = check_block_validity(inode, map);
+               int ret;
+               if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
+                       /* delayed alloc may be allocated by fallocate and
+                        * coverted to initialized by directIO.
+                        * we need to handle delayed extent here.
+                        */
+                       down_write((&EXT4_I(inode)->i_data_sem));
+                       goto delayed_mapped;
+               }
+               ret = check_block_validity(inode, map);
                if (ret != 0)
                        return ret;
        }
@@ -652,12 +618,15 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
        if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
                ext4_clear_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
 
-               /* If we have successfully mapped the delayed allocated blocks,
-                * set the BH_Da_Mapped bit on them. Its important to do this
-                * under the protection of i_data_sem.
-                */
-               if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
-                       set_buffers_da_mapped(inode, map);
+               if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
+                       int ret;
+delayed_mapped:
+                       /* delayed allocation blocks has been allocated */
+                       ret = ext4_es_remove_extent(inode, map->m_lblk,
+                                                   map->m_len);
+                       if (ret < 0)
+                               retval = ret;
+               }
        }
 
        up_write((&EXT4_I(inode)->i_data_sem));
@@ -680,10 +649,13 @@ static int _ext4_get_block(struct inode *inode, sector_t iblock,
        int ret = 0, started = 0;
        int dio_credits;
 
+       if (ext4_has_inline_data(inode))
+               return -ERANGE;
+
        map.m_lblk = iblock;
        map.m_len = bh->b_size >> inode->i_blkbits;
 
-       if (flags && !handle) {
+       if (flags && !(flags & EXT4_GET_BLOCKS_NO_LOCK) && !handle) {
                /* Direct IO write... */
                if (map.m_len > DIO_MAX_BLOCKS)
                        map.m_len = DIO_MAX_BLOCKS;
@@ -798,13 +770,13 @@ struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
        return NULL;
 }
 
-static int walk_page_buffers(handle_t *handle,
-                            struct buffer_head *head,
-                            unsigned from,
-                            unsigned to,
-                            int *partial,
-                            int (*fn)(handle_t *handle,
-                                      struct buffer_head *bh))
+int ext4_walk_page_buffers(handle_t *handle,
+                          struct buffer_head *head,
+                          unsigned from,
+                          unsigned to,
+                          int *partial,
+                          int (*fn)(handle_t *handle,
+                                    struct buffer_head *bh))
 {
        struct buffer_head *bh;
        unsigned block_start, block_end;
@@ -854,8 +826,8 @@ static int walk_page_buffers(handle_t *handle,
  * is elevated.  We'll still have enough credits for the tiny quotafile
  * write.
  */
-static int do_journal_get_write_access(handle_t *handle,
-                                      struct buffer_head *bh)
+int do_journal_get_write_access(handle_t *handle,
+                               struct buffer_head *bh)
 {
        int dirty = buffer_dirty(bh);
        int ret;
@@ -878,7 +850,7 @@ static int do_journal_get_write_access(handle_t *handle,
        return ret;
 }
 
-static int ext4_get_block_write(struct inode *inode, sector_t iblock,
+static int ext4_get_block_write_nolock(struct inode *inode, sector_t iblock,
                   struct buffer_head *bh_result, int create);
 static int ext4_write_begin(struct file *file, struct address_space *mapping,
                            loff_t pos, unsigned len, unsigned flags,
@@ -902,6 +874,17 @@ static int ext4_write_begin(struct file *file, struct address_space *mapping,
        from = pos & (PAGE_CACHE_SIZE - 1);
        to = from + len;
 
+       if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
+               ret = ext4_try_to_write_inline_data(mapping, inode, pos, len,
+                                                   flags, pagep);
+               if (ret < 0)
+                       goto out;
+               if (ret == 1) {
+                       ret = 0;
+                       goto out;
+               }
+       }
+
 retry:
        handle = ext4_journal_start(inode, needed_blocks);
        if (IS_ERR(handle)) {
@@ -919,6 +902,7 @@ retry:
                ret = -ENOMEM;
                goto out;
        }
+
        *pagep = page;
 
        if (ext4_should_dioread_nolock(inode))
@@ -927,8 +911,9 @@ retry:
                ret = __block_write_begin(page, pos, len, ext4_get_block);
 
        if (!ret && ext4_should_journal_data(inode)) {
-               ret = walk_page_buffers(handle, page_buffers(page),
-                               from, to, NULL, do_journal_get_write_access);
+               ret = ext4_walk_page_buffers(handle, page_buffers(page),
+                                            from, to, NULL,
+                                            do_journal_get_write_access);
        }
 
        if (ret) {
@@ -983,7 +968,12 @@ static int ext4_generic_write_end(struct file *file,
        struct inode *inode = mapping->host;
        handle_t *handle = ext4_journal_current_handle();
 
-       copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
+       if (ext4_has_inline_data(inode))
+               copied = ext4_write_inline_data_end(inode, pos, len,
+                                                   copied, page);
+       else
+               copied = block_write_end(file, mapping, pos,
+                                        len, copied, page, fsdata);
 
        /*
         * No need to use i_size_read() here, the i_size
@@ -1134,16 +1124,21 @@ static int ext4_journalled_write_end(struct file *file,
 
        BUG_ON(!ext4_handle_valid(handle));
 
-       if (copied < len) {
-               if (!PageUptodate(page))
-                       copied = 0;
-               page_zero_new_buffers(page, from+copied, to);
-       }
+       if (ext4_has_inline_data(inode))
+               copied = ext4_write_inline_data_end(inode, pos, len,
+                                                   copied, page);
+       else {
+               if (copied < len) {
+                       if (!PageUptodate(page))
+                               copied = 0;
+                       page_zero_new_buffers(page, from+copied, to);
+               }
 
-       ret = walk_page_buffers(handle, page_buffers(page), from,
-                               to, &partial, write_end_fn);
-       if (!partial)
-               SetPageUptodate(page);
+               ret = ext4_walk_page_buffers(handle, page_buffers(page), from,
+                                            to, &partial, write_end_fn);
+               if (!partial)
+                       SetPageUptodate(page);
+       }
        new_i_size = pos + copied;
        if (new_i_size > inode->i_size)
                i_size_write(inode, pos+copied);
@@ -1301,6 +1296,7 @@ static void ext4_da_page_release_reservation(struct page *page,
        struct inode *inode = page->mapping->host;
        struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
        int num_clusters;
+       ext4_fsblk_t lblk;
 
        head = page_buffers(page);
        bh = head;
@@ -1310,20 +1306,23 @@ static void ext4_da_page_release_reservation(struct page *page,
                if ((offset <= curr_off) && (buffer_delay(bh))) {
                        to_release++;
                        clear_buffer_delay(bh);
-                       clear_buffer_da_mapped(bh);
                }
                curr_off = next_off;
        } while ((bh = bh->b_this_page) != head);
 
+       if (to_release) {
+               lblk = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
+               ext4_es_remove_extent(inode, lblk, to_release);
+       }
+
        /* If we have released all the blocks belonging to a cluster, then we
         * need to release the reserved space for that cluster. */
        num_clusters = EXT4_NUM_B2C(sbi, to_release);
        while (num_clusters > 0) {
-               ext4_fsblk_t lblk;
                lblk = (page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits)) +
                        ((num_clusters - 1) << sbi->s_cluster_bits);
                if (sbi->s_cluster_ratio == 1 ||
-                   !ext4_find_delalloc_cluster(inode, lblk, 1))
+                   !ext4_find_delalloc_cluster(inode, lblk))
                        ext4_da_release_space(inode, 1);
 
                num_clusters--;
@@ -1429,8 +1428,6 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd,
                                                clear_buffer_delay(bh);
                                                bh->b_blocknr = pblock;
                                        }
-                                       if (buffer_da_mapped(bh))
-                                               clear_buffer_da_mapped(bh);
                                        if (buffer_unwritten(bh) ||
                                            buffer_mapped(bh))
                                                BUG_ON(bh->b_blocknr != pblock);
@@ -1500,9 +1497,16 @@ static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd)
        struct pagevec pvec;
        struct inode *inode = mpd->inode;
        struct address_space *mapping = inode->i_mapping;
+       ext4_lblk_t start, last;
 
        index = mpd->first_page;
        end   = mpd->next_page - 1;
+
+       start = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
+       last = end << (PAGE_CACHE_SHIFT - inode->i_blkbits);
+       ext4_es_remove_extent(inode, start, last - start + 1);
+
+       pagevec_init(&pvec, 0);
        while (index <= end) {
                nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
                if (nr_pages == 0)
@@ -1656,15 +1660,6 @@ static void mpage_da_map_and_submit(struct mpage_da_data *mpd)
 
                for (i = 0; i < map.m_len; i++)
                        unmap_underlying_metadata(bdev, map.m_pblk + i);
-
-               if (ext4_should_order_data(mpd->inode)) {
-                       err = ext4_jbd2_file_inode(handle, mpd->inode);
-                       if (err) {
-                               /* Only if the journal is aborted */
-                               mpd->retval = err;
-                               goto submit_io;
-                       }
-               }
        }
 
        /*
@@ -1795,7 +1790,19 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
         * file system block.
         */
        down_read((&EXT4_I(inode)->i_data_sem));
-       if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
+       if (ext4_has_inline_data(inode)) {
+               /*
+                * We will soon create blocks for this page, and let
+                * us pretend as if the blocks aren't allocated yet.
+                * In case of clusters, we have to handle the work
+                * of mapping from cluster so that the reserved space
+                * is calculated properly.
+                */
+               if ((EXT4_SB(inode->i_sb)->s_cluster_ratio > 1) &&
+                   ext4_find_delalloc_cluster(inode, map->m_lblk))
+                       map->m_flags |= EXT4_MAP_FROM_CLUSTER;
+               retval = 0;
+       } else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
                retval = ext4_ext_map_blocks(NULL, inode, map, 0);
        else
                retval = ext4_ind_map_blocks(NULL, inode, map, 0);
@@ -1814,6 +1821,10 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
                                goto out_unlock;
                }
 
+               retval = ext4_es_insert_extent(inode, map->m_lblk, map->m_len);
+               if (retval)
+                       goto out_unlock;
+
                /* Clear EXT4_MAP_FROM_CLUSTER flag since its purpose is served
                 * and it should not appear on the bh->b_state.
                 */
@@ -1842,8 +1853,8 @@ out_unlock:
  * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev
  * initialized properly.
  */
-static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
-                                 struct buffer_head *bh, int create)
+int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
+                          struct buffer_head *bh, int create)
 {
        struct ext4_map_blocks map;
        int ret = 0;
@@ -1917,15 +1928,29 @@ static int __ext4_journalled_writepage(struct page *page,
 {
        struct address_space *mapping = page->mapping;
        struct inode *inode = mapping->host;
-       struct buffer_head *page_bufs;
+       struct buffer_head *page_bufs = NULL;
        handle_t *handle = NULL;
-       int ret = 0;
-       int err;
+       int ret = 0, err = 0;
+       int inline_data = ext4_has_inline_data(inode);
+       struct buffer_head *inode_bh = NULL;
 
        ClearPageChecked(page);
-       page_bufs = page_buffers(page);
-       BUG_ON(!page_bufs);
-       walk_page_buffers(handle, page_bufs, 0, len, NULL, bget_one);
+
+       if (inline_data) {
+               BUG_ON(page->index != 0);
+               BUG_ON(len > ext4_get_max_inline_size(inode));
+               inode_bh = ext4_journalled_write_inline_data(inode, len, page);
+               if (inode_bh == NULL)
+                       goto out;
+       } else {
+               page_bufs = page_buffers(page);
+               if (!page_bufs) {
+                       BUG();
+                       goto out;
+               }
+               ext4_walk_page_buffers(handle, page_bufs, 0, len,
+                                      NULL, bget_one);
+       }
        /* As soon as we unlock the page, it can go away, but we have
         * references to buffers so we are safe */
        unlock_page(page);
@@ -1938,11 +1963,18 @@ static int __ext4_journalled_writepage(struct page *page,
 
        BUG_ON(!ext4_handle_valid(handle));
 
-       ret = walk_page_buffers(handle, page_bufs, 0, len, NULL,
-                               do_journal_get_write_access);
+       if (inline_data) {
+               ret = ext4_journal_get_write_access(handle, inode_bh);
+
+               err = ext4_handle_dirty_metadata(handle, inode, inode_bh);
 
-       err = walk_page_buffers(handle, page_bufs, 0, len, NULL,
-                               write_end_fn);
+       } else {
+               ret = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL,
+                                            do_journal_get_write_access);
+
+               err = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL,
+                                            write_end_fn);
+       }
        if (ret == 0)
                ret = err;
        EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
@@ -1950,9 +1982,12 @@ static int __ext4_journalled_writepage(struct page *page,
        if (!ret)
                ret = err;
 
-       walk_page_buffers(handle, page_bufs, 0, len, NULL, bput_one);
+       if (!ext4_has_inline_data(inode))
+               ext4_walk_page_buffers(handle, page_bufs, 0, len,
+                                      NULL, bput_one);
        ext4_set_inode_state(inode, EXT4_STATE_JDATA);
 out:
+       brelse(inode_bh);
        return ret;
 }
 
@@ -2029,8 +2064,8 @@ static int ext4_writepage(struct page *page,
                commit_write = 1;
        }
        page_bufs = page_buffers(page);
-       if (walk_page_buffers(NULL, page_bufs, 0, len, NULL,
-                             ext4_bh_delay_or_unwritten)) {
+       if (ext4_walk_page_buffers(NULL, page_bufs, 0, len, NULL,
+                                  ext4_bh_delay_or_unwritten)) {
                /*
                 * We don't want to do block allocation, so redirty
                 * the page and return.  We may reach here when we do
@@ -2096,7 +2131,8 @@ static int ext4_da_writepages_trans_blocks(struct inode *inode)
  * mpage_da_map_and_submit to map a single contiguous memory region
  * and then write them.
  */
-static int write_cache_pages_da(struct address_space *mapping,
+static int write_cache_pages_da(handle_t *handle,
+                               struct address_space *mapping,
                                struct writeback_control *wbc,
                                struct mpage_da_data *mpd,
                                pgoff_t *done_index)
@@ -2175,6 +2211,17 @@ static int write_cache_pages_da(struct address_space *mapping,
                        wait_on_page_writeback(page);
                        BUG_ON(PageWriteback(page));
 
+                       /*
+                        * If we have inline data and arrive here, it means that
+                        * we will soon create the block for the 1st page, so
+                        * we'd better clear the inline data here.
+                        */
+                       if (ext4_has_inline_data(inode)) {
+                               BUG_ON(ext4_test_inode_state(inode,
+                                               EXT4_STATE_MAY_INLINE_DATA));
+                               ext4_destroy_inline_data(handle, inode);
+                       }
+
                        if (mpd->next_page != page->index)
                                mpd->first_page = page->index;
                        mpd->next_page = page->index + 1;
@@ -2381,7 +2428,8 @@ retry:
                 * contiguous region of logical blocks that need
                 * blocks to be allocated by ext4 and submit them.
                 */
-               ret = write_cache_pages_da(mapping, wbc, &mpd, &done_index);
+               ret = write_cache_pages_da(handle, mapping,
+                                          wbc, &mpd, &done_index);
                /*
                 * If we have a contiguous extent of pages and we
                 * haven't done the I/O yet, map the blocks and submit
@@ -2445,7 +2493,6 @@ out_writepages:
        return ret;
 }
 
-#define FALL_BACK_TO_NONDELALLOC 1
 static int ext4_nonda_switch(struct super_block *sb)
 {
        s64 free_blocks, dirty_blocks;
@@ -2502,6 +2549,19 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
        }
        *fsdata = (void *)0;
        trace_ext4_da_write_begin(inode, pos, len, flags);
+
+       if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
+               ret = ext4_da_write_inline_data_begin(mapping, inode,
+                                                     pos, len, flags,
+                                                     pagep, fsdata);
+               if (ret < 0)
+                       goto out;
+               if (ret == 1) {
+                       ret = 0;
+                       goto out;
+               }
+       }
+
 retry:
        /*
         * With delayed allocation, we don't log the i_disksize update
@@ -2603,22 +2663,13 @@ static int ext4_da_write_end(struct file *file,
         * changes.  So let's piggyback the i_disksize mark_inode_dirty
         * into that.
         */
-
        new_i_size = pos + copied;
        if (copied && new_i_size > EXT4_I(inode)->i_disksize) {
-               if (ext4_da_should_update_i_disksize(page, end)) {
+               if (ext4_has_inline_data(inode) ||
+                   ext4_da_should_update_i_disksize(page, end)) {
                        down_write(&EXT4_I(inode)->i_data_sem);
-                       if (new_i_size > EXT4_I(inode)->i_disksize) {
-                               /*
-                                * Updating i_disksize when extending file
-                                * without needing block allocation
-                                */
-                               if (ext4_should_order_data(inode))
-                                       ret = ext4_jbd2_file_inode(handle,
-                                                                  inode);
-
+                       if (new_i_size > EXT4_I(inode)->i_disksize)
                                EXT4_I(inode)->i_disksize = new_i_size;
-                       }
                        up_write(&EXT4_I(inode)->i_data_sem);
                        /* We need to mark inode dirty even if
                         * new_i_size is less that inode->i_size
@@ -2627,8 +2678,16 @@ static int ext4_da_write_end(struct file *file,
                        ext4_mark_inode_dirty(handle, inode);
                }
        }
-       ret2 = generic_write_end(file, mapping, pos, len, copied,
+
+       if (write_mode != CONVERT_INLINE_DATA &&
+           ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) &&
+           ext4_has_inline_data(inode))
+               ret2 = ext4_da_write_inline_data_end(inode, pos, len, copied,
+                                                    page);
+       else
+               ret2 = generic_write_end(file, mapping, pos, len, copied,
                                                        page, fsdata);
+
        copied = ret2;
        if (ret2 < 0)
                ret = ret2;
@@ -2721,6 +2780,12 @@ static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
        journal_t *journal;
        int err;
 
+       /*
+        * We can get here for an inline file via the FIBMAP ioctl
+        */
+       if (ext4_has_inline_data(inode))
+               return 0;
+
        if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
                        test_opt(inode->i_sb, DELALLOC)) {
                /*
@@ -2766,14 +2831,30 @@ static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
 
 static int ext4_readpage(struct file *file, struct page *page)
 {
+       int ret = -EAGAIN;
+       struct inode *inode = page->mapping->host;
+
        trace_ext4_readpage(page);
-       return mpage_readpage(page, ext4_get_block);
+
+       if (ext4_has_inline_data(inode))
+               ret = ext4_readpage_inline(inode, page);
+
+       if (ret == -EAGAIN)
+               return mpage_readpage(page, ext4_get_block);
+
+       return ret;
 }
 
 static int
 ext4_readpages(struct file *file, struct address_space *mapping,
                struct list_head *pages, unsigned nr_pages)
 {
+       struct inode *inode = mapping->host;
+
+       /* If the file has inline data, no need to do readpages. */
+       if (ext4_has_inline_data(inode))
+               return 0;
+
        return mpage_readpages(mapping, pages, nr_pages, ext4_get_block);
 }
 
@@ -2840,7 +2921,7 @@ static int ext4_releasepage(struct page *page, gfp_t wait)
  * We allocate an uinitialized extent if blocks haven't been allocated.
  * The extent will be converted to initialized after the IO is complete.
  */
-static int ext4_get_block_write(struct inode *inode, sector_t iblock,
+int ext4_get_block_write(struct inode *inode, sector_t iblock,
                   struct buffer_head *bh_result, int create)
 {
        ext4_debug("ext4_get_block_write: inode %lu, create flag %d\n",
@@ -2850,29 +2931,12 @@ static int ext4_get_block_write(struct inode *inode, sector_t iblock,
 }
 
 static int ext4_get_block_write_nolock(struct inode *inode, sector_t iblock,
-                  struct buffer_head *bh_result, int flags)
+                  struct buffer_head *bh_result, int create)
 {
-       handle_t *handle = ext4_journal_current_handle();
-       struct ext4_map_blocks map;
-       int ret = 0;
-
-       ext4_debug("ext4_get_block_write_nolock: inode %lu, flag %d\n",
-                  inode->i_ino, flags);
-
-       flags = EXT4_GET_BLOCKS_NO_LOCK;
-
-       map.m_lblk = iblock;
-       map.m_len = bh_result->b_size >> inode->i_blkbits;
-
-       ret = ext4_map_blocks(handle, inode, &map, flags);
-       if (ret > 0) {
-               map_bh(bh_result, inode->i_sb, map.m_pblk);
-               bh_result->b_state = (bh_result->b_state & ~EXT4_MAP_FLAGS) |
-                                       map.m_flags;
-               bh_result->b_size = inode->i_sb->s_blocksize * map.m_len;
-               ret = 0;
-       }
-       return ret;
+       ext4_debug("ext4_get_block_write_nolock: inode %lu, create flag %d\n",
+                  inode->i_ino, create);
+       return _ext4_get_block(inode, iblock, bh_result,
+                              EXT4_GET_BLOCKS_NO_LOCK);
 }
 
 static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
@@ -2978,10 +3042,10 @@ retry:
  * fall back to buffered IO.
  *
  * For holes, we fallocate those blocks, mark them as uninitialized
- * If those blocks were preallocated, we mark sure they are splited, but
+ * If those blocks were preallocated, we mark sure they are split, but
  * still keep the range to write as uninitialized.
  *
- * The unwrritten extents will be converted to written when DIO is completed.
+ * The unwritten extents will be converted to written when DIO is completed.
  * For async direct IO, since the IO may still pending when return, we
  * set up an end_io call back function, which will do the conversion
  * when async direct IO completed.
@@ -2999,125 +3063,120 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
        struct inode *inode = file->f_mapping->host;
        ssize_t ret;
        size_t count = iov_length(iov, nr_segs);
-
+       int overwrite = 0;
+       get_block_t *get_block_func = NULL;
+       int dio_flags = 0;
        loff_t final_size = offset + count;
-       if (rw == WRITE && final_size <= inode->i_size) {
-               int overwrite = 0;
 
-               BUG_ON(iocb->private == NULL);
+       /* Use the old path for reads and writes beyond i_size. */
+       if (rw != WRITE || final_size > inode->i_size)
+               return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
 
-               /* If we do a overwrite dio, i_mutex locking can be released */
-               overwrite = *((int *)iocb->private);
+       BUG_ON(iocb->private == NULL);
 
-               if (overwrite) {
-                       atomic_inc(&inode->i_dio_count);
-                       down_read(&EXT4_I(inode)->i_data_sem);
-                       mutex_unlock(&inode->i_mutex);
-               }
+       /* If we do a overwrite dio, i_mutex locking can be released */
+       overwrite = *((int *)iocb->private);
 
-               /*
-                * We could direct write to holes and fallocate.
-                *
-                * Allocated blocks to fill the hole are marked as uninitialized
-                * to prevent parallel buffered read to expose the stale data
-                * before DIO complete the data IO.
-                *
-                * As to previously fallocated extents, ext4 get_block
-                * will just simply mark the buffer mapped but still
-                * keep the extents uninitialized.
-                *
-                * for non AIO case, we will convert those unwritten extents
-                * to written after return back from blockdev_direct_IO.
-                *
-                * for async DIO, the conversion needs to be defered when
-                * the IO is completed. The ext4 end_io callback function
-                * will be called to take care of the conversion work.
-                * Here for async case, we allocate an io_end structure to
-                * hook to the iocb.
-                */
-               iocb->private = NULL;
-               ext4_inode_aio_set(inode, NULL);
-               if (!is_sync_kiocb(iocb)) {
-                       ext4_io_end_t *io_end =
-                               ext4_init_io_end(inode, GFP_NOFS);
-                       if (!io_end) {
-                               ret = -ENOMEM;
-                               goto retake_lock;
-                       }
-                       io_end->flag |= EXT4_IO_END_DIRECT;
-                       iocb->private = io_end;
-                       /*
-                        * we save the io structure for current async
-                        * direct IO, so that later ext4_map_blocks()
-                        * could flag the io structure whether there
-                        * is a unwritten extents needs to be converted
-                        * when IO is completed.
-                        */
-                       ext4_inode_aio_set(inode, io_end);
-               }
+       if (overwrite) {
+               atomic_inc(&inode->i_dio_count);
+               down_read(&EXT4_I(inode)->i_data_sem);
+               mutex_unlock(&inode->i_mutex);
+       }
 
-               if (overwrite)
-                       ret = __blockdev_direct_IO(rw, iocb, inode,
-                                                inode->i_sb->s_bdev, iov,
-                                                offset, nr_segs,
-                                                ext4_get_block_write_nolock,
-                                                ext4_end_io_dio,
-                                                NULL,
-                                                0);
-               else
-                       ret = __blockdev_direct_IO(rw, iocb, inode,
-                                                inode->i_sb->s_bdev, iov,
-                                                offset, nr_segs,
-                                                ext4_get_block_write,
-                                                ext4_end_io_dio,
-                                                NULL,
-                                                DIO_LOCKING);
-               if (iocb->private)
-                       ext4_inode_aio_set(inode, NULL);
+       /*
+        * We could direct write to holes and fallocate.
+        *
+        * Allocated blocks to fill the hole are marked as
+        * uninitialized to prevent parallel buffered read to expose
+        * the stale data before DIO complete the data IO.
+        *
+        * As to previously fallocated extents, ext4 get_block will
+        * just simply mark the buffer mapped but still keep the
+        * extents uninitialized.
+        *
+        * For non AIO case, we will convert those unwritten extents
+        * to written after return back from blockdev_direct_IO.
+        *
+        * For async DIO, the conversion needs to be deferred when the
+        * IO is completed. The ext4 end_io callback function will be
+        * called to take care of the conversion work.  Here for async
+        * case, we allocate an io_end structure to hook to the iocb.
+        */
+       iocb->private = NULL;
+       ext4_inode_aio_set(inode, NULL);
+       if (!is_sync_kiocb(iocb)) {
+               ext4_io_end_t *io_end = ext4_init_io_end(inode, GFP_NOFS);
+               if (!io_end) {
+                       ret = -ENOMEM;
+                       goto retake_lock;
+               }
+               io_end->flag |= EXT4_IO_END_DIRECT;
+               iocb->private = io_end;
                /*
-                * The io_end structure takes a reference to the inode,
-                * that structure needs to be destroyed and the
-                * reference to the inode need to be dropped, when IO is
-                * complete, even with 0 byte write, or failed.
-                *
-                * In the successful AIO DIO case, the io_end structure will be
-                * desctroyed and the reference to the inode will be dropped
-                * after the end_io call back function is called.
-                *
-                * In the case there is 0 byte write, or error case, since
-                * VFS direct IO won't invoke the end_io call back function,
-                * we need to free the end_io structure here.
+                * we save the io structure for current async direct
+                * IO, so that later ext4_map_blocks() could flag the
+                * io structure whether there is a unwritten extents
+                * needs to be converted when IO is completed.
                 */
-               if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) {
-                       ext4_free_io_end(iocb->private);
-                       iocb->private = NULL;
-               } else if (ret > 0 && !overwrite && ext4_test_inode_state(inode,
-                                               EXT4_STATE_DIO_UNWRITTEN)) {
-                       int err;
-                       /*
-                        * for non AIO case, since the IO is already
-                        * completed, we could do the conversion right here
-                        */
-                       err = ext4_convert_unwritten_extents(inode,
-                                                            offset, ret);
-                       if (err < 0)
-                               ret = err;
-                       ext4_clear_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
-               }
+               ext4_inode_aio_set(inode, io_end);
+       }
 
-       retake_lock:
-               /* take i_mutex locking again if we do a ovewrite dio */
-               if (overwrite) {
-                       inode_dio_done(inode);
-                       up_read(&EXT4_I(inode)->i_data_sem);
-                       mutex_lock(&inode->i_mutex);
-               }
+       if (overwrite) {
+               get_block_func = ext4_get_block_write_nolock;
+       } else {
+               get_block_func = ext4_get_block_write;
+               dio_flags = DIO_LOCKING;
+       }
+       ret = __blockdev_direct_IO(rw, iocb, inode,
+                                  inode->i_sb->s_bdev, iov,
+                                  offset, nr_segs,
+                                  get_block_func,
+                                  ext4_end_io_dio,
+                                  NULL,
+                                  dio_flags);
+
+       if (iocb->private)
+               ext4_inode_aio_set(inode, NULL);
+       /*
+        * The io_end structure takes a reference to the inode, that
+        * structure needs to be destroyed and the reference to the
+        * inode need to be dropped, when IO is complete, even with 0
+        * byte write, or failed.
+        *
+        * In the successful AIO DIO case, the io_end structure will
+        * be destroyed and the reference to the inode will be dropped
+        * after the end_io call back function is called.
+        *
+        * In the case there is 0 byte write, or error case, since VFS
+        * direct IO won't invoke the end_io call back function, we
+        * need to free the end_io structure here.
+        */
+       if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) {
+               ext4_free_io_end(iocb->private);
+               iocb->private = NULL;
+       } else if (ret > 0 && !overwrite && ext4_test_inode_state(inode,
+                                               EXT4_STATE_DIO_UNWRITTEN)) {
+               int err;
+               /*
+                * for non AIO case, since the IO is already
+                * completed, we could do the conversion right here
+                */
+               err = ext4_convert_unwritten_extents(inode,
+                                                    offset, ret);
+               if (err < 0)
+                       ret = err;
+               ext4_clear_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
+       }
 
-               return ret;
+retake_lock:
+       /* take i_mutex locking again if we do a ovewrite dio */
+       if (overwrite) {
+               inode_dio_done(inode);
+               up_read(&EXT4_I(inode)->i_data_sem);
+               mutex_lock(&inode->i_mutex);
        }
 
-       /* for write the the end of file case, we fall back to old way */
-       return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
+       return ret;
 }
 
 static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
@@ -3134,6 +3193,10 @@ static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
        if (ext4_should_journal_data(inode))
                return 0;
 
+       /* Let buffer I/O handle the inline data case. */
+       if (ext4_has_inline_data(inode))
+               return 0;
+
        trace_ext4_direct_IO_enter(inode, offset, iov_length(iov, nr_segs), rw);
        if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
                ret = ext4_ext_direct_IO(rw, iocb, iov, offset, nr_segs);
@@ -3531,6 +3594,14 @@ void ext4_truncate(struct inode *inode)
        if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC))
                ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
 
+       if (ext4_has_inline_data(inode)) {
+               int has_inline = 1;
+
+               ext4_inline_data_truncate(inode, &has_inline);
+               if (has_inline)
+                       return;
+       }
+
        if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
                ext4_ext_truncate(inode);
        else
@@ -3756,6 +3827,19 @@ static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
        }
 }
 
+static inline void ext4_iget_extra_inode(struct inode *inode,
+                                        struct ext4_inode *raw_inode,
+                                        struct ext4_inode_info *ei)
+{
+       __le32 *magic = (void *)raw_inode +
+                       EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize;
+       if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC)) {
+               ext4_set_inode_state(inode, EXT4_STATE_XATTR);
+               ext4_find_inline_data_nolock(inode);
+       } else
+               EXT4_I(inode)->i_inline_off = 0;
+}
+
 struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
 {
        struct ext4_iloc iloc;
@@ -3826,6 +3910,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
        set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
 
        ext4_clear_state_flags(ei);     /* Only relevant on 32-bit archs */
+       ei->i_inline_off = 0;
        ei->i_dir_start_lookup = 0;
        ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
        /* We now have enough fields to check if the inode was active or not.
@@ -3898,11 +3983,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
                        ei->i_extra_isize = sizeof(struct ext4_inode) -
                                            EXT4_GOOD_OLD_INODE_SIZE;
                } else {
-                       __le32 *magic = (void *)raw_inode +
-                                       EXT4_GOOD_OLD_INODE_SIZE +
-                                       ei->i_extra_isize;
-                       if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC))
-                               ext4_set_inode_state(inode, EXT4_STATE_XATTR);
+                       ext4_iget_extra_inode(inode, raw_inode, ei);
                }
        }
 
@@ -3925,17 +4006,19 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
                                 ei->i_file_acl);
                ret = -EIO;
                goto bad_inode;
-       } else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
-               if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
-                   (S_ISLNK(inode->i_mode) &&
-                    !ext4_inode_is_fast_symlink(inode)))
-                       /* Validate extent which is part of inode */
-                       ret = ext4_ext_check_inode(inode);
-       } else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
-                  (S_ISLNK(inode->i_mode) &&
-                   !ext4_inode_is_fast_symlink(inode))) {
-               /* Validate block references which are part of inode */
-               ret = ext4_ind_check_inode(inode);
+       } else if (!ext4_has_inline_data(inode)) {
+               if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
+                       if ((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
+                           (S_ISLNK(inode->i_mode) &&
+                            !ext4_inode_is_fast_symlink(inode))))
+                               /* Validate extent which is part of inode */
+                               ret = ext4_ext_check_inode(inode);
+               } else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
+                          (S_ISLNK(inode->i_mode) &&
+                           !ext4_inode_is_fast_symlink(inode))) {
+                       /* Validate block references which are part of inode */
+                       ret = ext4_ind_check_inode(inode);
+               }
        }
        if (ret)
                goto bad_inode;
@@ -4122,9 +4205,10 @@ static int ext4_do_update_inode(handle_t *handle,
                                cpu_to_le32(new_encode_dev(inode->i_rdev));
                        raw_inode->i_block[2] = 0;
                }
-       } else
+       } else if (!ext4_has_inline_data(inode)) {
                for (block = 0; block < EXT4_N_BLOCKS; block++)
                        raw_inode->i_block[block] = ei->i_data[block];
+       }
 
        raw_inode->i_disk_version = cpu_to_le32(inode->i_version);
        if (ei->i_extra_isize) {
@@ -4811,8 +4895,9 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
         * journal_start/journal_stop which can block and take a long time
         */
        if (page_has_buffers(page)) {
-               if (!walk_page_buffers(NULL, page_buffers(page), 0, len, NULL,
-                                       ext4_bh_unmapped)) {
+               if (!ext4_walk_page_buffers(NULL, page_buffers(page),
+                                           0, len, NULL,
+                                           ext4_bh_unmapped)) {
                        /* Wait so that we don't change page under IO */
                        wait_on_page_writeback(page);
                        ret = VM_FAULT_LOCKED;
@@ -4833,7 +4918,7 @@ retry_alloc:
        }
        ret = __block_page_mkwrite(vma, vmf, get_block);
        if (!ret && ext4_should_journal_data(inode)) {
-               if (walk_page_buffers(handle, page_buffers(page), 0,
+               if (ext4_walk_page_buffers(handle, page_buffers(page), 0,
                          PAGE_CACHE_SIZE, NULL, do_journal_get_write_access)) {
                        unlock_page(page);
                        ret = VM_FAULT_SIGBUS;
index 526e553..1bf6fe7 100644 (file)
@@ -1373,7 +1373,7 @@ static int mb_find_extent(struct ext4_buddy *e4b, int block,
        ex->fe_start += next;
 
        while (needed > ex->fe_len &&
-              (buddy = mb_find_buddy(e4b, order, &max))) {
+              mb_find_buddy(e4b, order, &max)) {
 
                if (block + 1 >= max)
                        break;
@@ -2607,9 +2607,17 @@ static void ext4_free_data_callback(struct super_block *sb,
        mb_debug(1, "gonna free %u blocks in group %u (0x%p):",
                 entry->efd_count, entry->efd_group, entry);
 
-       if (test_opt(sb, DISCARD))
-               ext4_issue_discard(sb, entry->efd_group,
-                                  entry->efd_start_cluster, entry->efd_count);
+       if (test_opt(sb, DISCARD)) {
+               err = ext4_issue_discard(sb, entry->efd_group,
+                                        entry->efd_start_cluster,
+                                        entry->efd_count);
+               if (err && err != -EOPNOTSUPP)
+                       ext4_msg(sb, KERN_WARNING, "discard request in"
+                                " group:%d block:%d count:%d failed"
+                                " with %d", entry->efd_group,
+                                entry->efd_start_cluster,
+                                entry->efd_count, err);
+       }
 
        err = ext4_mb_load_buddy(sb, entry->efd_group, &e4b);
        /* we expect to find existing buddy because it's pinned */
@@ -4310,8 +4318,10 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
 repeat:
                /* allocate space in core */
                *errp = ext4_mb_regular_allocator(ac);
-               if (*errp)
+               if (*errp) {
+                       ext4_discard_allocated_blocks(ac);
                        goto errout;
+               }
 
                /* as we've just preallocated more space than
                 * user requested orinally, we store allocated
@@ -4333,10 +4343,10 @@ repeat:
                        ac->ac_b_ex.fe_len = 0;
                        ac->ac_status = AC_STATUS_CONTINUE;
                        goto repeat;
-               } else if (*errp)
-               errout:
+               } else if (*errp) {
                        ext4_discard_allocated_blocks(ac);
-               else {
+                       goto errout;
+               } else {
                        block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
                        ar->len = ac->ac_b_ex.fe_len;
                }
@@ -4347,6 +4357,7 @@ repeat:
                *errp = -ENOSPC;
        }
 
+errout:
        if (*errp) {
                ac->ac_b_ex.fe_len = 0;
                ar->len = 0;
@@ -4656,8 +4667,16 @@ do_more:
                 * with group lock held. generate_buddy look at
                 * them with group lock_held
                 */
-               if (test_opt(sb, DISCARD))
-                       ext4_issue_discard(sb, block_group, bit, count);
+               if (test_opt(sb, DISCARD)) {
+                       err = ext4_issue_discard(sb, block_group, bit, count);
+                       if (err && err != -EOPNOTSUPP)
+                               ext4_msg(sb, KERN_WARNING, "discard request in"
+                                        " group:%d block:%d count:%lu failed"
+                                        " with %d", block_group, bit, count,
+                                        err);
+               }
+
+
                ext4_lock_group(sb, block_group);
                mb_clear_bits(bitmap_bh->b_data, bit, count_clusters);
                mb_free_blocks(inode, &e4b, bit, count_clusters);
@@ -4851,10 +4870,11 @@ error_return:
  * one will allocate those blocks, mark it as used in buddy bitmap. This must
  * be called with under the group lock.
  */
-static void ext4_trim_extent(struct super_block *sb, int start, int count,
+static int ext4_trim_extent(struct super_block *sb, int start, int count,
                             ext4_group_t group, struct ext4_buddy *e4b)
 {
        struct ext4_free_extent ex;
+       int ret = 0;
 
        trace_ext4_trim_extent(sb, group, start, count);
 
@@ -4870,9 +4890,10 @@ static void ext4_trim_extent(struct super_block *sb, int start, int count,
         */
        mb_mark_used(e4b, &ex);
        ext4_unlock_group(sb, group);
-       ext4_issue_discard(sb, group, start, count);
+       ret = ext4_issue_discard(sb, group, start, count);
        ext4_lock_group(sb, group);
        mb_free_blocks(NULL, e4b, start, ex.fe_len);
+       return ret;
 }
 
 /**
@@ -4901,7 +4922,7 @@ ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
        void *bitmap;
        ext4_grpblk_t next, count = 0, free_count = 0;
        struct ext4_buddy e4b;
-       int ret;
+       int ret = 0;
 
        trace_ext4_trim_all_free(sb, group, start, max);
 
@@ -4928,8 +4949,11 @@ ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
                next = mb_find_next_bit(bitmap, max + 1, start);
 
                if ((next - start) >= minblocks) {
-                       ext4_trim_extent(sb, start,
-                                        next - start, group, &e4b);
+                       ret = ext4_trim_extent(sb, start,
+                                              next - start, group, &e4b);
+                       if (ret && ret != -EOPNOTSUPP)
+                               break;
+                       ret = 0;
                        count += next - start;
                }
                free_count += next - start;
@@ -4950,8 +4974,10 @@ ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
                        break;
        }
 
-       if (!ret)
+       if (!ret) {
+               ret = count;
                EXT4_MB_GRP_SET_TRIMMED(e4b.bd_info);
+       }
 out:
        ext4_unlock_group(sb, group);
        ext4_mb_unload_buddy(&e4b);
@@ -4959,7 +4985,7 @@ out:
        ext4_debug("trimmed %d blocks in the group %d\n",
                count, group);
 
-       return count;
+       return ret;
 }
 
 /**
index f1bb32e..db8226d 100644 (file)
@@ -14,6 +14,7 @@
 
 #include <linux/slab.h>
 #include "ext4_jbd2.h"
+#include "ext4_extents.h"
 
 /*
  * The contiguous blocks details which can be
index 292daee..d9cc5ee 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/slab.h>
 #include "ext4_jbd2.h"
 #include "ext4.h"
+#include "ext4_extents.h"
 
 /**
  * get_ext_path - Find an extent path for designated logical block number.
index 6d600a6..cac4482 100644 (file)
@@ -202,13 +202,8 @@ static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
                             struct inode *inode);
 
 /* checksumming functions */
-#define EXT4_DIRENT_TAIL(block, blocksize) \
-       ((struct ext4_dir_entry_tail *)(((void *)(block)) + \
-                                       ((blocksize) - \
-                                        sizeof(struct ext4_dir_entry_tail))))
-
-static void initialize_dirent_tail(struct ext4_dir_entry_tail *t,
-                                  unsigned int blocksize)
+void initialize_dirent_tail(struct ext4_dir_entry_tail *t,
+                           unsigned int blocksize)
 {
        memset(t, 0, sizeof(struct ext4_dir_entry_tail));
        t->det_rec_len = ext4_rec_len_to_disk(
@@ -261,6 +256,12 @@ static __le32 ext4_dirent_csum(struct inode *inode,
        return cpu_to_le32(csum);
 }
 
+static void warn_no_space_for_csum(struct inode *inode)
+{
+       ext4_warning(inode->i_sb, "no space in directory inode %lu leaf for "
+                    "checksum.  Please run e2fsck -D.", inode->i_ino);
+}
+
 int ext4_dirent_csum_verify(struct inode *inode, struct ext4_dir_entry *dirent)
 {
        struct ext4_dir_entry_tail *t;
@@ -271,8 +272,7 @@ int ext4_dirent_csum_verify(struct inode *inode, struct ext4_dir_entry *dirent)
 
        t = get_dirent_tail(inode, dirent);
        if (!t) {
-               EXT4_ERROR_INODE(inode, "metadata_csum set but no space in dir "
-                                "leaf for checksum.  Please run e2fsck -D.");
+               warn_no_space_for_csum(inode);
                return 0;
        }
 
@@ -294,8 +294,7 @@ static void ext4_dirent_csum_set(struct inode *inode,
 
        t = get_dirent_tail(inode, dirent);
        if (!t) {
-               EXT4_ERROR_INODE(inode, "metadata_csum set but no space in dir "
-                                "leaf for checksum.  Please run e2fsck -D.");
+               warn_no_space_for_csum(inode);
                return;
        }
 
@@ -303,9 +302,9 @@ static void ext4_dirent_csum_set(struct inode *inode,
                                           (void *)t - (void *)dirent);
 }
 
-static inline int ext4_handle_dirty_dirent_node(handle_t *handle,
-                                               struct inode *inode,
-                                               struct buffer_head *bh)
+int ext4_handle_dirty_dirent_node(handle_t *handle,
+                                 struct inode *inode,
+                                 struct buffer_head *bh)
 {
        ext4_dirent_csum_set(inode, (struct ext4_dir_entry *)bh->b_data);
        return ext4_handle_dirty_metadata(handle, inode, bh);
@@ -377,8 +376,7 @@ static int ext4_dx_csum_verify(struct inode *inode,
        count = le16_to_cpu(c->count);
        if (count_offset + (limit * sizeof(struct dx_entry)) >
            EXT4_BLOCK_SIZE(inode->i_sb) - sizeof(struct dx_tail)) {
-               EXT4_ERROR_INODE(inode, "metadata_csum set but no space for "
-                                "tree checksum found.  Run e2fsck -D.");
+               warn_no_space_for_csum(inode);
                return 1;
        }
        t = (struct dx_tail *)(((struct dx_entry *)c) + limit);
@@ -408,8 +406,7 @@ static void ext4_dx_csum_set(struct inode *inode, struct ext4_dir_entry *dirent)
        count = le16_to_cpu(c->count);
        if (count_offset + (limit * sizeof(struct dx_entry)) >
            EXT4_BLOCK_SIZE(inode->i_sb) - sizeof(struct dx_tail)) {
-               EXT4_ERROR_INODE(inode, "metadata_csum set but no space for "
-                                "tree checksum.  Run e2fsck -D.");
+               warn_no_space_for_csum(inode);
                return;
        }
        t = (struct dx_tail *)(((struct dx_entry *)c) + limit);
@@ -890,6 +887,7 @@ static int htree_dirblock_to_tree(struct file *dir_file,
                                           EXT4_DIR_REC_LEN(0));
        for (; de < top; de = ext4_next_entry(de, dir->i_sb->s_blocksize)) {
                if (ext4_check_dir_entry(dir, NULL, de, bh,
+                               bh->b_data, bh->b_size,
                                (block<<EXT4_BLOCK_SIZE_BITS(dir->i_sb))
                                         + ((char *)de - bh->b_data))) {
                        /* On error, skip the f_pos to the next block. */
@@ -1007,6 +1005,15 @@ errout:
        return (err);
 }
 
+static inline int search_dirblock(struct buffer_head *bh,
+                                 struct inode *dir,
+                                 const struct qstr *d_name,
+                                 unsigned int offset,
+                                 struct ext4_dir_entry_2 **res_dir)
+{
+       return search_dir(bh, bh->b_data, dir->i_sb->s_blocksize, dir,
+                         d_name, offset, res_dir);
+}
 
 /*
  * Directory block splitting, compacting
@@ -1081,13 +1088,6 @@ static void dx_insert_block(struct dx_frame *frame, u32 hash, ext4_lblk_t block)
        dx_set_count(entries, count + 1);
 }
 
-static void ext4_update_dx_flag(struct inode *inode)
-{
-       if (!EXT4_HAS_COMPAT_FEATURE(inode->i_sb,
-                                    EXT4_FEATURE_COMPAT_DIR_INDEX))
-               ext4_clear_inode_flag(inode, EXT4_INODE_INDEX);
-}
-
 /*
  * NOTE! unlike strncmp, ext4_match returns 1 for success, 0 for failure.
  *
@@ -1107,11 +1107,13 @@ static inline int ext4_match (int len, const char * const name,
 /*
  * Returns 0 if not found, -1 on failure, and 1 on success
  */
-static inline int search_dirblock(struct buffer_head *bh,
-                                 struct inode *dir,
-                                 const struct qstr *d_name,
-                                 unsigned int offset,
-                                 struct ext4_dir_entry_2 ** res_dir)
+int search_dir(struct buffer_head *bh,
+              char *search_buf,
+              int buf_size,
+              struct inode *dir,
+              const struct qstr *d_name,
+              unsigned int offset,
+              struct ext4_dir_entry_2 **res_dir)
 {
        struct ext4_dir_entry_2 * de;
        char * dlimit;
@@ -1119,8 +1121,8 @@ static inline int search_dirblock(struct buffer_head *bh,
        const char *name = d_name->name;
        int namelen = d_name->len;
 
-       de = (struct ext4_dir_entry_2 *) bh->b_data;
-       dlimit = bh->b_data + dir->i_sb->s_blocksize;
+       de = (struct ext4_dir_entry_2 *)search_buf;
+       dlimit = search_buf + buf_size;
        while ((char *) de < dlimit) {
                /* this code is executed quadratically often */
                /* do minimal checking `by hand' */
@@ -1128,7 +1130,8 @@ static inline int search_dirblock(struct buffer_head *bh,
                if ((char *) de + namelen <= dlimit &&
                    ext4_match (namelen, name, de)) {
                        /* found a match - just to be sure, do a full check */
-                       if (ext4_check_dir_entry(dir, NULL, de, bh, offset))
+                       if (ext4_check_dir_entry(dir, NULL, de, bh, bh->b_data,
+                                                bh->b_size, offset))
                                return -1;
                        *res_dir = de;
                        return 1;
@@ -1144,6 +1147,21 @@ static inline int search_dirblock(struct buffer_head *bh,
        return 0;
 }
 
+static int is_dx_internal_node(struct inode *dir, ext4_lblk_t block,
+                              struct ext4_dir_entry *de)
+{
+       struct super_block *sb = dir->i_sb;
+
+       if (!is_dx(dir))
+               return 0;
+       if (block == 0)
+               return 1;
+       if (de->inode == 0 &&
+           ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize) ==
+                       sb->s_blocksize)
+               return 1;
+       return 0;
+}
 
 /*
  *     ext4_find_entry()
@@ -1158,7 +1176,8 @@ static inline int search_dirblock(struct buffer_head *bh,
  */
 static struct buffer_head * ext4_find_entry (struct inode *dir,
                                        const struct qstr *d_name,
-                                       struct ext4_dir_entry_2 ** res_dir)
+                                       struct ext4_dir_entry_2 **res_dir,
+                                       int *inlined)
 {
        struct super_block *sb;
        struct buffer_head *bh_use[NAMEI_RA_SIZE];
@@ -1179,6 +1198,18 @@ static struct buffer_head * ext4_find_entry (struct inode *dir,
        namelen = d_name->len;
        if (namelen > EXT4_NAME_LEN)
                return NULL;
+
+       if (ext4_has_inline_data(dir)) {
+               int has_inline_data = 1;
+               ret = ext4_find_inline_entry(dir, d_name, res_dir,
+                                            &has_inline_data);
+               if (has_inline_data) {
+                       if (inlined)
+                               *inlined = 1;
+                       return ret;
+               }
+       }
+
        if ((namelen <= 2) && (name[0] == '.') &&
            (name[1] == '.' || name[1] == '\0')) {
                /*
@@ -1244,6 +1275,8 @@ restart:
                        goto next;
                }
                if (!buffer_verified(bh) &&
+                   !is_dx_internal_node(dir, block,
+                                        (struct ext4_dir_entry *)bh->b_data) &&
                    !ext4_dirent_csum_verify(dir,
                                (struct ext4_dir_entry *)bh->b_data)) {
                        EXT4_ERROR_INODE(dir, "checksumming directory "
@@ -1361,7 +1394,7 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsi
        if (dentry->d_name.len > EXT4_NAME_LEN)
                return ERR_PTR(-ENAMETOOLONG);
 
-       bh = ext4_find_entry(dir, &dentry->d_name, &de);
+       bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
        inode = NULL;
        if (bh) {
                __u32 ino = le32_to_cpu(de->inode);
@@ -1395,7 +1428,7 @@ struct dentry *ext4_get_parent(struct dentry *child)
        struct ext4_dir_entry_2 * de;
        struct buffer_head *bh;
 
-       bh = ext4_find_entry(child->d_inode, &dotdot, &de);
+       bh = ext4_find_entry(child->d_inode, &dotdot, &de, NULL);
        if (!bh)
                return ERR_PTR(-ENOENT);
        ino = le32_to_cpu(de->inode);
@@ -1593,6 +1626,63 @@ errout:
        return NULL;
 }
 
+int ext4_find_dest_de(struct inode *dir, struct inode *inode,
+                     struct buffer_head *bh,
+                     void *buf, int buf_size,
+                     const char *name, int namelen,
+                     struct ext4_dir_entry_2 **dest_de)
+{
+       struct ext4_dir_entry_2 *de;
+       unsigned short reclen = EXT4_DIR_REC_LEN(namelen);
+       int nlen, rlen;
+       unsigned int offset = 0;
+       char *top;
+
+       de = (struct ext4_dir_entry_2 *)buf;
+       top = buf + buf_size - reclen;
+       while ((char *) de <= top) {
+               if (ext4_check_dir_entry(dir, NULL, de, bh,
+                                        buf, buf_size, offset))
+                       return -EIO;
+               if (ext4_match(namelen, name, de))
+                       return -EEXIST;
+               nlen = EXT4_DIR_REC_LEN(de->name_len);
+               rlen = ext4_rec_len_from_disk(de->rec_len, buf_size);
+               if ((de->inode ? rlen - nlen : rlen) >= reclen)
+                       break;
+               de = (struct ext4_dir_entry_2 *)((char *)de + rlen);
+               offset += rlen;
+       }
+       if ((char *) de > top)
+               return -ENOSPC;
+
+       *dest_de = de;
+       return 0;
+}
+
+void ext4_insert_dentry(struct inode *inode,
+                       struct ext4_dir_entry_2 *de,
+                       int buf_size,
+                       const char *name, int namelen)
+{
+
+       int nlen, rlen;
+
+       nlen = EXT4_DIR_REC_LEN(de->name_len);
+       rlen = ext4_rec_len_from_disk(de->rec_len, buf_size);
+       if (de->inode) {
+               struct ext4_dir_entry_2 *de1 =
+                               (struct ext4_dir_entry_2 *)((char *)de + nlen);
+               de1->rec_len = ext4_rec_len_to_disk(rlen - nlen, buf_size);
+               de->rec_len = ext4_rec_len_to_disk(nlen, buf_size);
+               de = de1;
+       }
+       de->file_type = EXT4_FT_UNKNOWN;
+       de->inode = cpu_to_le32(inode->i_ino);
+       ext4_set_de_type(inode->i_sb, de, inode->i_mode);
+       de->name_len = namelen;
+       memcpy(de->name, name, namelen);
+}
 /*
  * Add a new entry into a directory (leaf) block.  If de is non-NULL,
  * it points to a directory entry which is guaranteed to be large
@@ -1608,12 +1698,10 @@ static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry,
        struct inode    *dir = dentry->d_parent->d_inode;
        const char      *name = dentry->d_name.name;
        int             namelen = dentry->d_name.len;
-       unsigned int    offset = 0;
        unsigned int    blocksize = dir->i_sb->s_blocksize;
        unsigned short  reclen;
-       int             nlen, rlen, err;
-       char            *top;
        int             csum_size = 0;
+       int             err;
 
        if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
                                       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
@@ -1621,22 +1709,11 @@ static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry,
 
        reclen = EXT4_DIR_REC_LEN(namelen);
        if (!de) {
-               de = (struct ext4_dir_entry_2 *)bh->b_data;
-               top = bh->b_data + (blocksize - csum_size) - reclen;
-               while ((char *) de <= top) {
-                       if (ext4_check_dir_entry(dir, NULL, de, bh, offset))
-                               return -EIO;
-                       if (ext4_match(namelen, name, de))
-                               return -EEXIST;
-                       nlen = EXT4_DIR_REC_LEN(de->name_len);
-                       rlen = ext4_rec_len_from_disk(de->rec_len, blocksize);
-                       if ((de->inode? rlen - nlen: rlen) >= reclen)
-                               break;
-                       de = (struct ext4_dir_entry_2 *)((char *)de + rlen);
-                       offset += rlen;
-               }
-               if ((char *) de > top)
-                       return -ENOSPC;
+               err = ext4_find_dest_de(dir, inode,
+                                       bh, bh->b_data, blocksize - csum_size,
+                                       name, namelen, &de);
+               if (err)
+                       return err;
        }
        BUFFER_TRACE(bh, "get_write_access");
        err = ext4_journal_get_write_access(handle, bh);
@@ -1646,19 +1723,8 @@ static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry,
        }
 
        /* By now the buffer is marked for journaling */
-       nlen = EXT4_DIR_REC_LEN(de->name_len);
-       rlen = ext4_rec_len_from_disk(de->rec_len, blocksize);
-       if (de->inode) {
-               struct ext4_dir_entry_2 *de1 = (struct ext4_dir_entry_2 *)((char *)de + nlen);
-               de1->rec_len = ext4_rec_len_to_disk(rlen - nlen, blocksize);
-               de->rec_len = ext4_rec_len_to_disk(nlen, blocksize);
-               de = de1;
-       }
-       de->file_type = EXT4_FT_UNKNOWN;
-       de->inode = cpu_to_le32(inode->i_ino);
-       ext4_set_de_type(dir->i_sb, de, inode->i_mode);
-       de->name_len = namelen;
-       memcpy(de->name, name, namelen);
+       ext4_insert_dentry(inode, de, blocksize, name, namelen);
+
        /*
         * XXX shouldn't update any times until successful
         * completion of syscall, but too many callers depend
@@ -1831,6 +1897,17 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
        blocksize = sb->s_blocksize;
        if (!dentry->d_name.len)
                return -EINVAL;
+
+       if (ext4_has_inline_data(dir)) {
+               retval = ext4_try_add_inline_entry(handle, dentry, inode);
+               if (retval < 0)
+                       return retval;
+               if (retval == 1) {
+                       retval = 0;
+                       return retval;
+               }
+       }
+
        if (is_dx(dir)) {
                retval = ext4_dx_add_entry(handle, dentry, inode);
                if (!retval || (retval != ERR_BAD_DX_DIR))
@@ -2036,36 +2113,29 @@ cleanup:
 }
 
 /*
- * ext4_delete_entry deletes a directory entry by merging it with the
- * previous entry
+ * ext4_generic_delete_entry deletes a directory entry by merging it
+ * with the previous entry
  */
-static int ext4_delete_entry(handle_t *handle,
-                            struct inode *dir,
-                            struct ext4_dir_entry_2 *de_del,
-                            struct buffer_head *bh)
+int ext4_generic_delete_entry(handle_t *handle,
+                             struct inode *dir,
+                             struct ext4_dir_entry_2 *de_del,
+                             struct buffer_head *bh,
+                             void *entry_buf,
+                             int buf_size,
+                             int csum_size)
 {
        struct ext4_dir_entry_2 *de, *pde;
        unsigned int blocksize = dir->i_sb->s_blocksize;
-       int csum_size = 0;
-       int i, err;
-
-       if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
-                                      EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
-               csum_size = sizeof(struct ext4_dir_entry_tail);
+       int i;
 
        i = 0;
        pde = NULL;
-       de = (struct ext4_dir_entry_2 *) bh->b_data;
-       while (i < bh->b_size - csum_size) {
-               if (ext4_check_dir_entry(dir, NULL, de, bh, i))
+       de = (struct ext4_dir_entry_2 *)entry_buf;
+       while (i < buf_size - csum_size) {
+               if (ext4_check_dir_entry(dir, NULL, de, bh,
+                                        bh->b_data, bh->b_size, i))
                        return -EIO;
                if (de == de_del)  {
-                       BUFFER_TRACE(bh, "get_write_access");
-                       err = ext4_journal_get_write_access(handle, bh);
-                       if (unlikely(err)) {
-                               ext4_std_error(dir->i_sb, err);
-                               return err;
-                       }
                        if (pde)
                                pde->rec_len = ext4_rec_len_to_disk(
                                        ext4_rec_len_from_disk(pde->rec_len,
@@ -2076,12 +2146,6 @@ static int ext4_delete_entry(handle_t *handle,
                        else
                                de->inode = 0;
                        dir->i_version++;
-                       BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
-                       err = ext4_handle_dirty_dirent_node(handle, dir, bh);
-                       if (unlikely(err)) {
-                               ext4_std_error(dir->i_sb, err);
-                               return err;
-                       }
                        return 0;
                }
                i += ext4_rec_len_from_disk(de->rec_len, blocksize);
@@ -2091,6 +2155,48 @@ static int ext4_delete_entry(handle_t *handle,
        return -ENOENT;
 }
 
+static int ext4_delete_entry(handle_t *handle,
+                            struct inode *dir,
+                            struct ext4_dir_entry_2 *de_del,
+                            struct buffer_head *bh)
+{
+       int err, csum_size = 0;
+
+       if (ext4_has_inline_data(dir)) {
+               int has_inline_data = 1;
+               err = ext4_delete_inline_entry(handle, dir, de_del, bh,
+                                              &has_inline_data);
+               if (has_inline_data)
+                       return err;
+       }
+
+       if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
+                                      EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
+               csum_size = sizeof(struct ext4_dir_entry_tail);
+
+       BUFFER_TRACE(bh, "get_write_access");
+       err = ext4_journal_get_write_access(handle, bh);
+       if (unlikely(err))
+               goto out;
+
+       err = ext4_generic_delete_entry(handle, dir, de_del,
+                                       bh, bh->b_data,
+                                       dir->i_sb->s_blocksize, csum_size);
+       if (err)
+               goto out;
+
+       BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
+       err = ext4_handle_dirty_dirent_node(handle, dir, bh);
+       if (unlikely(err))
+               goto out;
+
+       return 0;
+out:
+       if (err != -ENOENT)
+               ext4_std_error(dir->i_sb, err);
+       return err;
+}
+
 /*
  * DIR_NLINK feature is set if 1) nlinks > EXT4_LINK_MAX or 2) nlinks == 2,
  * since this indicates that nlinks count was previously 1.
@@ -2211,21 +2317,95 @@ retry:
        return err;
 }
 
-static int ext4_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+struct ext4_dir_entry_2 *ext4_init_dot_dotdot(struct inode *inode,
+                         struct ext4_dir_entry_2 *de,
+                         int blocksize, int csum_size,
+                         unsigned int parent_ino, int dotdot_real_len)
+{
+       de->inode = cpu_to_le32(inode->i_ino);
+       de->name_len = 1;
+       de->rec_len = ext4_rec_len_to_disk(EXT4_DIR_REC_LEN(de->name_len),
+                                          blocksize);
+       strcpy(de->name, ".");
+       ext4_set_de_type(inode->i_sb, de, S_IFDIR);
+
+       de = ext4_next_entry(de, blocksize);
+       de->inode = cpu_to_le32(parent_ino);
+       de->name_len = 2;
+       if (!dotdot_real_len)
+               de->rec_len = ext4_rec_len_to_disk(blocksize -
+                                       (csum_size + EXT4_DIR_REC_LEN(1)),
+                                       blocksize);
+       else
+               de->rec_len = ext4_rec_len_to_disk(
+                               EXT4_DIR_REC_LEN(de->name_len), blocksize);
+       strcpy(de->name, "..");
+       ext4_set_de_type(inode->i_sb, de, S_IFDIR);
+
+       return ext4_next_entry(de, blocksize);
+}
+
+static int ext4_init_new_dir(handle_t *handle, struct inode *dir,
+                            struct inode *inode)
 {
-       handle_t *handle;
-       struct inode *inode;
        struct buffer_head *dir_block = NULL;
        struct ext4_dir_entry_2 *de;
        struct ext4_dir_entry_tail *t;
        unsigned int blocksize = dir->i_sb->s_blocksize;
        int csum_size = 0;
-       int err, retries = 0;
+       int err;
 
        if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
                                       EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
                csum_size = sizeof(struct ext4_dir_entry_tail);
 
+       if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
+               err = ext4_try_create_inline_dir(handle, dir, inode);
+               if (err < 0 && err != -ENOSPC)
+                       goto out;
+               if (!err)
+                       goto out;
+       }
+
+       inode->i_size = EXT4_I(inode)->i_disksize = blocksize;
+       dir_block = ext4_bread(handle, inode, 0, 1, &err);
+       if (!(dir_block = ext4_bread(handle, inode, 0, 1, &err))) {
+               if (!err) {
+                       err = -EIO;
+                       ext4_error(inode->i_sb,
+                                  "Directory hole detected on inode %lu\n",
+                                  inode->i_ino);
+               }
+               goto out;
+       }
+       BUFFER_TRACE(dir_block, "get_write_access");
+       err = ext4_journal_get_write_access(handle, dir_block);
+       if (err)
+               goto out;
+       de = (struct ext4_dir_entry_2 *)dir_block->b_data;
+       ext4_init_dot_dotdot(inode, de, blocksize, csum_size, dir->i_ino, 0);
+       set_nlink(inode, 2);
+       if (csum_size) {
+               t = EXT4_DIRENT_TAIL(dir_block->b_data, blocksize);
+               initialize_dirent_tail(t, blocksize);
+       }
+
+       BUFFER_TRACE(dir_block, "call ext4_handle_dirty_metadata");
+       err = ext4_handle_dirty_dirent_node(handle, inode, dir_block);
+       if (err)
+               goto out;
+       set_buffer_verified(dir_block);
+out:
+       brelse(dir_block);
+       return err;
+}
+
+static int ext4_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+{
+       handle_t *handle;
+       struct inode *inode;
+       int err, retries = 0;
+
        if (EXT4_DIR_LINK_MAX(dir))
                return -EMLINK;
 
@@ -2249,47 +2429,9 @@ retry:
 
        inode->i_op = &ext4_dir_inode_operations;
        inode->i_fop = &ext4_dir_operations;
-       inode->i_size = EXT4_I(inode)->i_disksize = inode->i_sb->s_blocksize;
-       if (!(dir_block = ext4_bread(handle, inode, 0, 1, &err))) {
-               if (!err) {
-                       err = -EIO;
-                       ext4_error(inode->i_sb,
-                                  "Directory hole detected on inode %lu\n",
-                                  inode->i_ino);
-               }
-               goto out_clear_inode;
-       }
-       BUFFER_TRACE(dir_block, "get_write_access");
-       err = ext4_journal_get_write_access(handle, dir_block);
-       if (err)
-               goto out_clear_inode;
-       de = (struct ext4_dir_entry_2 *) dir_block->b_data;
-       de->inode = cpu_to_le32(inode->i_ino);
-       de->name_len = 1;
-       de->rec_len = ext4_rec_len_to_disk(EXT4_DIR_REC_LEN(de->name_len),
-                                          blocksize);
-       strcpy(de->name, ".");
-       ext4_set_de_type(dir->i_sb, de, S_IFDIR);
-       de = ext4_next_entry(de, blocksize);
-       de->inode = cpu_to_le32(dir->i_ino);
-       de->rec_len = ext4_rec_len_to_disk(blocksize -
-                                          (csum_size + EXT4_DIR_REC_LEN(1)),
-                                          blocksize);
-       de->name_len = 2;
-       strcpy(de->name, "..");
-       ext4_set_de_type(dir->i_sb, de, S_IFDIR);
-       set_nlink(inode, 2);
-
-       if (csum_size) {
-               t = EXT4_DIRENT_TAIL(dir_block->b_data, blocksize);
-               initialize_dirent_tail(t, blocksize);
-       }
-
-       BUFFER_TRACE(dir_block, "call ext4_handle_dirty_metadata");
-       err = ext4_handle_dirty_dirent_node(handle, inode, dir_block);
+       err = ext4_init_new_dir(handle, dir, inode);
        if (err)
                goto out_clear_inode;
-       set_buffer_verified(dir_block);
        err = ext4_mark_inode_dirty(handle, inode);
        if (!err)
                err = ext4_add_entry(handle, dentry, inode);
@@ -2309,7 +2451,6 @@ out_clear_inode:
        unlock_new_inode(inode);
        d_instantiate(dentry, inode);
 out_stop:
-       brelse(dir_block);
        ext4_journal_stop(handle);
        if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
                goto retry;
@@ -2327,6 +2468,14 @@ static int empty_dir(struct inode *inode)
        struct super_block *sb;
        int err = 0;
 
+       if (ext4_has_inline_data(inode)) {
+               int has_inline_data = 1;
+
+               err = empty_inline_dir(inode, &has_inline_data);
+               if (has_inline_data)
+                       return err;
+       }
+
        sb = inode->i_sb;
        if (inode->i_size < EXT4_DIR_REC_LEN(1) + EXT4_DIR_REC_LEN(2) ||
            !(bh = ext4_bread(NULL, inode, 0, 0, &err))) {
@@ -2393,7 +2542,8 @@ static int empty_dir(struct inode *inode)
                        set_buffer_verified(bh);
                        de = (struct ext4_dir_entry_2 *) bh->b_data;
                }
-               if (ext4_check_dir_entry(inode, NULL, de, bh, offset)) {
+               if (ext4_check_dir_entry(inode, NULL, de, bh,
+                                        bh->b_data, bh->b_size, offset)) {
                        de = (struct ext4_dir_entry_2 *)(bh->b_data +
                                                         sb->s_blocksize);
                        offset = (offset | (sb->s_blocksize - 1)) + 1;
@@ -2579,7 +2729,7 @@ static int ext4_rmdir(struct inode *dir, struct dentry *dentry)
                return PTR_ERR(handle);
 
        retval = -ENOENT;
-       bh = ext4_find_entry(dir, &dentry->d_name, &de);
+       bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
        if (!bh)
                goto end_rmdir;
 
@@ -2644,7 +2794,7 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry)
                ext4_handle_sync(handle);
 
        retval = -ENOENT;
-       bh = ext4_find_entry(dir, &dentry->d_name, &de);
+       bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
        if (!bh)
                goto end_unlink;
 
@@ -2826,8 +2976,39 @@ retry:
        return err;
 }
 
-#define PARENT_INO(buffer, size) \
-       (ext4_next_entry((struct ext4_dir_entry_2 *)(buffer), size)->inode)
+
+/*
+ * Try to find buffer head where contains the parent block.
+ * It should be the inode block if it is inlined or the 1st block
+ * if it is a normal dir.
+ */
+static struct buffer_head *ext4_get_first_dir_block(handle_t *handle,
+                                       struct inode *inode,
+                                       int *retval,
+                                       struct ext4_dir_entry_2 **parent_de,
+                                       int *inlined)
+{
+       struct buffer_head *bh;
+
+       if (!ext4_has_inline_data(inode)) {
+               if (!(bh = ext4_bread(handle, inode, 0, 0, retval))) {
+                       if (!*retval) {
+                               *retval = -EIO;
+                               ext4_error(inode->i_sb,
+                                          "Directory hole detected on inode %lu\n",
+                                          inode->i_ino);
+                       }
+                       return NULL;
+               }
+               *parent_de = ext4_next_entry(
+                                       (struct ext4_dir_entry_2 *)bh->b_data,
+                                       inode->i_sb->s_blocksize);
+               return bh;
+       }
+
+       *inlined = 1;
+       return ext4_get_first_inline_block(inode, parent_de, retval);
+}
 
 /*
  * Anybody can rename anything with this: the permission checks are left to the
@@ -2841,6 +3022,8 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
        struct buffer_head *old_bh, *new_bh, *dir_bh;
        struct ext4_dir_entry_2 *old_de, *new_de;
        int retval, force_da_alloc = 0;
+       int inlined = 0, new_inlined = 0;
+       struct ext4_dir_entry_2 *parent_de;
 
        dquot_initialize(old_dir);
        dquot_initialize(new_dir);
@@ -2860,7 +3043,7 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
        if (IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir))
                ext4_handle_sync(handle);
 
-       old_bh = ext4_find_entry(old_dir, &old_dentry->d_name, &old_de);
+       old_bh = ext4_find_entry(old_dir, &old_dentry->d_name, &old_de, NULL);
        /*
         *  Check for inode number is _not_ due to possible IO errors.
         *  We might rmdir the source, keep it as pwd of some process
@@ -2873,7 +3056,8 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
                goto end_rename;
 
        new_inode = new_dentry->d_inode;
-       new_bh = ext4_find_entry(new_dir, &new_dentry->d_name, &new_de);
+       new_bh = ext4_find_entry(new_dir, &new_dentry->d_name,
+                                &new_de, &new_inlined);
        if (new_bh) {
                if (!new_inode) {
                        brelse(new_bh);
@@ -2887,22 +3071,17 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
                                goto end_rename;
                }
                retval = -EIO;
-               if (!(dir_bh = ext4_bread(handle, old_inode, 0, 0, &retval))) {
-                       if (!retval) {
-                               retval = -EIO;
-                               ext4_error(old_inode->i_sb,
-                                          "Directory hole detected on inode %lu\n",
-                                          old_inode->i_ino);
-                       }
+               dir_bh = ext4_get_first_dir_block(handle, old_inode,
+                                                 &retval, &parent_de,
+                                                 &inlined);
+               if (!dir_bh)
                        goto end_rename;
-               }
-               if (!buffer_verified(dir_bh) &&
+               if (!inlined && !buffer_verified(dir_bh) &&
                    !ext4_dirent_csum_verify(old_inode,
                                (struct ext4_dir_entry *)dir_bh->b_data))
                        goto end_rename;
                set_buffer_verified(dir_bh);
-               if (le32_to_cpu(PARENT_INO(dir_bh->b_data,
-                               old_dir->i_sb->s_blocksize)) != old_dir->i_ino)
+               if (le32_to_cpu(parent_de->inode) != old_dir->i_ino)
                        goto end_rename;
                retval = -EMLINK;
                if (!new_inode && new_dir != old_dir &&
@@ -2931,10 +3110,13 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
                                        ext4_current_time(new_dir);
                ext4_mark_inode_dirty(handle, new_dir);
                BUFFER_TRACE(new_bh, "call ext4_handle_dirty_metadata");
-               retval = ext4_handle_dirty_dirent_node(handle, new_dir, new_bh);
-               if (unlikely(retval)) {
-                       ext4_std_error(new_dir->i_sb, retval);
-                       goto end_rename;
+               if (!new_inlined) {
+                       retval = ext4_handle_dirty_dirent_node(handle,
+                                                              new_dir, new_bh);
+                       if (unlikely(retval)) {
+                               ext4_std_error(new_dir->i_sb, retval);
+                               goto end_rename;
+                       }
                }
                brelse(new_bh);
                new_bh = NULL;
@@ -2962,7 +3144,8 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
                struct buffer_head *old_bh2;
                struct ext4_dir_entry_2 *old_de2;
 
-               old_bh2 = ext4_find_entry(old_dir, &old_dentry->d_name, &old_de2);
+               old_bh2 = ext4_find_entry(old_dir, &old_dentry->d_name,
+                                         &old_de2, NULL);
                if (old_bh2) {
                        retval = ext4_delete_entry(handle, old_dir,
                                                   old_de2, old_bh2);
@@ -2982,17 +3165,19 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
        old_dir->i_ctime = old_dir->i_mtime = ext4_current_time(old_dir);
        ext4_update_dx_flag(old_dir);
        if (dir_bh) {
-               PARENT_INO(dir_bh->b_data, new_dir->i_sb->s_blocksize) =
-                                               cpu_to_le32(new_dir->i_ino);
+               parent_de->inode = cpu_to_le32(new_dir->i_ino);
                BUFFER_TRACE(dir_bh, "call ext4_handle_dirty_metadata");
-               if (is_dx(old_inode)) {
-                       retval = ext4_handle_dirty_dx_node(handle,
-                                                          old_inode,
-                                                          dir_bh);
+               if (!inlined) {
+                       if (is_dx(old_inode)) {
+                               retval = ext4_handle_dirty_dx_node(handle,
+                                                                  old_inode,
+                                                                  dir_bh);
+                       } else {
+                               retval = ext4_handle_dirty_dirent_node(handle,
+                                                       old_inode, dir_bh);
+                       }
                } else {
-                       retval = ext4_handle_dirty_dirent_node(handle,
-                                                              old_inode,
-                                                              dir_bh);
+                       retval = ext4_mark_inode_dirty(handle, old_inode);
                }
                if (retval) {
                        ext4_std_error(old_dir->i_sb, retval);
@@ -3043,23 +3228,19 @@ const struct inode_operations ext4_dir_inode_operations = {
        .mknod          = ext4_mknod,
        .rename         = ext4_rename,
        .setattr        = ext4_setattr,
-#ifdef CONFIG_EXT4_FS_XATTR
        .setxattr       = generic_setxattr,
        .getxattr       = generic_getxattr,
        .listxattr      = ext4_listxattr,
        .removexattr    = generic_removexattr,
-#endif
        .get_acl        = ext4_get_acl,
        .fiemap         = ext4_fiemap,
 };
 
 const struct inode_operations ext4_special_inode_operations = {
        .setattr        = ext4_setattr,
-#ifdef CONFIG_EXT4_FS_XATTR
        .setxattr       = generic_setxattr,
        .getxattr       = generic_getxattr,
        .listxattr      = ext4_listxattr,
        .removexattr    = generic_removexattr,
-#endif
        .get_acl        = ext4_get_acl,
 };
index 68e896e..0016fbc 100644 (file)
@@ -27,7 +27,6 @@
 #include "ext4_jbd2.h"
 #include "xattr.h"
 #include "acl.h"
-#include "ext4_extents.h"
 
 static struct kmem_cache *io_page_cachep, *io_end_cachep;
 
@@ -111,7 +110,7 @@ static int ext4_end_io(ext4_io_end_t *io)
                inode_dio_done(inode);
        /* Wake up anyone waiting on unwritten extent conversion */
        if (atomic_dec_and_test(&EXT4_I(inode)->i_unwritten))
-               wake_up_all(ext4_ioend_wq(io->inode));
+               wake_up_all(ext4_ioend_wq(inode));
        return ret;
 }
 
index 47bf06a..d99387b 100644 (file)
@@ -783,7 +783,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
 
        err = ext4_journal_get_write_access(handle, gdb_bh);
        if (unlikely(err))
-               goto exit_sbh;
+               goto exit_dind;
 
        err = ext4_journal_get_write_access(handle, dind);
        if (unlikely(err))
@@ -792,7 +792,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
        /* ext4_reserve_inode_write() gets a reference on the iloc */
        err = ext4_reserve_inode_write(handle, inode, &iloc);
        if (unlikely(err))
-               goto exit_dindj;
+               goto exit_dind;
 
        n_group_desc = ext4_kvmalloc((gdb_num + 1) *
                                     sizeof(struct buffer_head *),
@@ -846,12 +846,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
 
 exit_inode:
        ext4_kvfree(n_group_desc);
-       /* ext4_handle_release_buffer(handle, iloc.bh); */
        brelse(iloc.bh);
-exit_dindj:
-       /* ext4_handle_release_buffer(handle, dind); */
-exit_sbh:
-       /* ext4_handle_release_buffer(handle, EXT4_SB(sb)->s_sbh); */
 exit_dind:
        brelse(dind);
 exit_bh:
@@ -969,14 +964,8 @@ static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
        }
 
        for (i = 0; i < reserved_gdb; i++) {
-               if ((err = ext4_journal_get_write_access(handle, primary[i]))) {
-                       /*
-                       int j;
-                       for (j = 0; j < i; j++)
-                               ext4_handle_release_buffer(handle, primary[j]);
-                        */
+               if ((err = ext4_journal_get_write_access(handle, primary[i])))
                        goto exit_bh;
-               }
        }
 
        if ((err = ext4_reserve_inode_write(handle, inode, &iloc)))
index 80928f7..3cdb0a2 100644 (file)
@@ -45,7 +45,7 @@
 #include <linux/freezer.h>
 
 #include "ext4.h"
-#include "ext4_extents.h"
+#include "ext4_extents.h"      /* Needed for trace points definition */
 #include "ext4_jbd2.h"
 #include "xattr.h"
 #include "acl.h"
@@ -939,10 +939,11 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
                return NULL;
 
        ei->vfs_inode.i_version = 1;
-       ei->vfs_inode.i_data.writeback_index = 0;
        memset(&ei->i_cached_extent, 0, sizeof(struct ext4_ext_cache));
        INIT_LIST_HEAD(&ei->i_prealloc_list);
        spin_lock_init(&ei->i_prealloc_lock);
+       ext4_es_init_tree(&ei->i_es_tree);
+       rwlock_init(&ei->i_es_lock);
        ei->i_reserved_data_blocks = 0;
        ei->i_reserved_meta_blocks = 0;
        ei->i_allocated_meta_blocks = 0;
@@ -996,9 +997,7 @@ static void init_once(void *foo)
        struct ext4_inode_info *ei = (struct ext4_inode_info *) foo;
 
        INIT_LIST_HEAD(&ei->i_orphan);
-#ifdef CONFIG_EXT4_FS_XATTR
        init_rwsem(&ei->xattr_sem);
-#endif
        init_rwsem(&ei->i_data_sem);
        inode_init_once(&ei->vfs_inode);
 }
@@ -1031,6 +1030,7 @@ void ext4_clear_inode(struct inode *inode)
        clear_inode(inode);
        dquot_drop(inode);
        ext4_discard_preallocations(inode);
+       ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS);
        if (EXT4_I(inode)->jinode) {
                jbd2_journal_release_jbd_inode(EXT4_JOURNAL(inode),
                                               EXT4_I(inode)->jinode);
@@ -1447,13 +1447,8 @@ static const struct mount_opts {
        {Opt_data_journal, EXT4_MOUNT_JOURNAL_DATA, MOPT_DATAJ},
        {Opt_data_ordered, EXT4_MOUNT_ORDERED_DATA, MOPT_DATAJ},
        {Opt_data_writeback, EXT4_MOUNT_WRITEBACK_DATA, MOPT_DATAJ},
-#ifdef CONFIG_EXT4_FS_XATTR
        {Opt_user_xattr, EXT4_MOUNT_XATTR_USER, MOPT_SET},
        {Opt_nouser_xattr, EXT4_MOUNT_XATTR_USER, MOPT_CLEAR},
-#else
-       {Opt_user_xattr, 0, MOPT_NOSUPPORT},
-       {Opt_nouser_xattr, 0, MOPT_NOSUPPORT},
-#endif
 #ifdef CONFIG_EXT4_FS_POSIX_ACL
        {Opt_acl, EXT4_MOUNT_POSIX_ACL, MOPT_SET},
        {Opt_noacl, EXT4_MOUNT_POSIX_ACL, MOPT_CLEAR},
@@ -3202,7 +3197,6 @@ int ext4_calculate_overhead(struct super_block *sb)
        ext4_fsblk_t overhead = 0;
        char *buf = (char *) get_zeroed_page(GFP_KERNEL);
 
-       memset(buf, 0, PAGE_SIZE);
        if (!buf)
                return -ENOMEM;
 
@@ -3256,7 +3250,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
        unsigned int i;
        int needs_recovery, has_huge_files, has_bigalloc;
        __u64 blocks_count;
-       int err;
+       int err = 0;
        unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
        ext4_group_t first_not_zeroed;
 
@@ -3272,9 +3266,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
        }
        sb->s_fs_info = sbi;
        sbi->s_sb = sb;
-       sbi->s_mount_opt = 0;
-       sbi->s_resuid = make_kuid(&init_user_ns, EXT4_DEF_RESUID);
-       sbi->s_resgid = make_kgid(&init_user_ns, EXT4_DEF_RESGID);
        sbi->s_inode_readahead_blks = EXT4_DEF_INODE_READAHEAD_BLKS;
        sbi->s_sb_block = sb_block;
        if (sb->s_bdev->bd_part)
@@ -3285,6 +3276,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
        for (cp = sb->s_id; (cp = strchr(cp, '/'));)
                *cp = '!';
 
+       /* -EINVAL is default */
        ret = -EINVAL;
        blocksize = sb_min_blocksize(sb, EXT4_MIN_BLOCK_SIZE);
        if (!blocksize) {
@@ -3369,9 +3361,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
        if (def_mount_opts & EXT4_DEFM_UID16)
                set_opt(sb, NO_UID32);
        /* xattr user namespace & acls are now defaulted on */
-#ifdef CONFIG_EXT4_FS_XATTR
        set_opt(sb, XATTR_USER);
-#endif
 #ifdef CONFIG_EXT4_FS_POSIX_ACL
        set_opt(sb, POSIX_ACL);
 #endif
@@ -3662,7 +3652,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
                         " too large to mount safely on this system");
                if (sizeof(sector_t) < 8)
                        ext4_msg(sb, KERN_WARNING, "CONFIG_LBDAF not enabled");
-               ret = err;
                goto failed_mount;
        }
 
@@ -3770,7 +3759,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
        }
        if (err) {
                ext4_msg(sb, KERN_ERR, "insufficient memory");
-               ret = err;
                goto failed_mount3;
        }
 
@@ -3801,7 +3789,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
 
        INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */
        mutex_init(&sbi->s_orphan_lock);
-       sbi->s_resize_flags = 0;
 
        sb->s_root = NULL;
 
@@ -3897,8 +3884,8 @@ no_journal:
        if (es->s_overhead_clusters)
                sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters);
        else {
-               ret = ext4_calculate_overhead(sb);
-               if (ret)
+               err = ext4_calculate_overhead(sb);
+               if (err)
                        goto failed_mount_wq;
        }
 
@@ -3910,6 +3897,7 @@ no_journal:
                alloc_workqueue("ext4-dio-unwritten", WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
        if (!EXT4_SB(sb)->dio_unwritten_wq) {
                printk(KERN_ERR "EXT4-fs: failed to create DIO workqueue\n");
+               ret = -ENOMEM;
                goto failed_mount_wq;
        }
 
@@ -4012,12 +4000,20 @@ no_journal:
        /* Enable quota usage during mount. */
        if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA) &&
            !(sb->s_flags & MS_RDONLY)) {
-               ret = ext4_enable_quotas(sb);
-               if (ret)
+               err = ext4_enable_quotas(sb);
+               if (err)
                        goto failed_mount7;
        }
 #endif  /* CONFIG_QUOTA */
 
+       if (test_opt(sb, DISCARD)) {
+               struct request_queue *q = bdev_get_queue(sb->s_bdev);
+               if (!blk_queue_discard(q))
+                       ext4_msg(sb, KERN_WARNING,
+                                "mounting with \"discard\" option, but "
+                                "the device does not support discard");
+       }
+
        ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. "
                 "Opts: %s%s%s", descr, sbi->s_es->s_mount_opts,
                 *sbi->s_es->s_mount_opts ? "; " : "", orig_data);
@@ -4084,7 +4080,7 @@ out_fail:
        kfree(sbi);
 out_free_orig:
        kfree(orig_data);
-       return ret;
+       return err ? err : ret;
 }
 
 /*
@@ -4790,7 +4786,7 @@ static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf)
 
        buf->f_type = EXT4_SUPER_MAGIC;
        buf->f_bsize = sb->s_blocksize;
-       buf->f_blocks = ext4_blocks_count(es) - EXT4_C2B(sbi, sbi->s_overhead);
+       buf->f_blocks = ext4_blocks_count(es) - EXT4_C2B(sbi, overhead);
        bfree = percpu_counter_sum_positive(&sbi->s_freeclusters_counter) -
                percpu_counter_sum_positive(&sbi->s_dirtyclusters_counter);
        /* prevent underflow in case that few free space is available */
@@ -5282,6 +5278,7 @@ static int __init ext4_init_fs(void)
        ext4_li_info = NULL;
        mutex_init(&ext4_li_mtx);
 
+       /* Build-time check for flags consistency */
        ext4_check_flag_values();
 
        for (i = 0; i < EXT4_WQ_HASH_SZ; i++) {
@@ -5289,9 +5286,14 @@ static int __init ext4_init_fs(void)
                init_waitqueue_head(&ext4__ioend_wq[i]);
        }
 
-       err = ext4_init_pageio();
+       err = ext4_init_es();
        if (err)
                return err;
+
+       err = ext4_init_pageio();
+       if (err)
+               goto out7;
+
        err = ext4_init_system_zone();
        if (err)
                goto out6;
@@ -5341,6 +5343,9 @@ out5:
        ext4_exit_system_zone();
 out6:
        ext4_exit_pageio();
+out7:
+       ext4_exit_es();
+
        return err;
 }
 
index ed9354a..ff37119 100644 (file)
@@ -35,22 +35,18 @@ const struct inode_operations ext4_symlink_inode_operations = {
        .follow_link    = page_follow_link_light,
        .put_link       = page_put_link,
        .setattr        = ext4_setattr,
-#ifdef CONFIG_EXT4_FS_XATTR
        .setxattr       = generic_setxattr,
        .getxattr       = generic_getxattr,
        .listxattr      = ext4_listxattr,
        .removexattr    = generic_removexattr,
-#endif
 };
 
 const struct inode_operations ext4_fast_symlink_inode_operations = {
        .readlink       = generic_readlink,
        .follow_link    = ext4_follow_link,
        .setattr        = ext4_setattr,
-#ifdef CONFIG_EXT4_FS_XATTR
        .setxattr       = generic_setxattr,
        .getxattr       = generic_getxattr,
        .listxattr      = ext4_listxattr,
        .removexattr    = generic_removexattr,
-#endif
 };
index 2cdb98d..3a91ebc 100644 (file)
 #include "xattr.h"
 #include "acl.h"
 
-#define BHDR(bh) ((struct ext4_xattr_header *)((bh)->b_data))
-#define ENTRY(ptr) ((struct ext4_xattr_entry *)(ptr))
-#define BFIRST(bh) ENTRY(BHDR(bh)+1)
-#define IS_LAST_ENTRY(entry) (*(__u32 *)(entry) == 0)
-
 #ifdef EXT4_XATTR_DEBUG
 # define ea_idebug(inode, f...) do { \
                printk(KERN_DEBUG "inode %s:%lu: ", \
@@ -312,7 +307,7 @@ cleanup:
        return error;
 }
 
-static int
+int
 ext4_xattr_ibody_get(struct inode *inode, int name_index, const char *name,
                     void *buffer, size_t buffer_size)
 {
@@ -581,21 +576,6 @@ static size_t ext4_xattr_free_space(struct ext4_xattr_entry *last,
        return (*min_offs - ((void *)last - base) - sizeof(__u32));
 }
 
-struct ext4_xattr_info {
-       int name_index;
-       const char *name;
-       const void *value;
-       size_t value_len;
-};
-
-struct ext4_xattr_search {
-       struct ext4_xattr_entry *first;
-       void *base;
-       void *end;
-       struct ext4_xattr_entry *here;
-       int not_found;
-};
-
 static int
 ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s)
 {
@@ -648,9 +628,14 @@ ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s)
                                   size. Just replace. */
                                s->here->e_value_size =
                                        cpu_to_le32(i->value_len);
-                               memset(val + size - EXT4_XATTR_PAD, 0,
-                                      EXT4_XATTR_PAD); /* Clear pad bytes. */
-                               memcpy(val, i->value, i->value_len);
+                               if (i->value == EXT4_ZERO_XATTR_VALUE) {
+                                       memset(val, 0, size);
+                               } else {
+                                       /* Clear pad bytes first. */
+                                       memset(val + size - EXT4_XATTR_PAD, 0,
+                                              EXT4_XATTR_PAD);
+                                       memcpy(val, i->value, i->value_len);
+                               }
                                return 0;
                        }
 
@@ -689,9 +674,14 @@ ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s)
                        size_t size = EXT4_XATTR_SIZE(i->value_len);
                        void *val = s->base + min_offs - size;
                        s->here->e_value_offs = cpu_to_le16(min_offs - size);
-                       memset(val + size - EXT4_XATTR_PAD, 0,
-                              EXT4_XATTR_PAD); /* Clear the pad bytes. */
-                       memcpy(val, i->value, i->value_len);
+                       if (i->value == EXT4_ZERO_XATTR_VALUE) {
+                               memset(val, 0, size);
+                       } else {
+                               /* Clear the pad bytes first. */
+                               memset(val + size - EXT4_XATTR_PAD, 0,
+                                      EXT4_XATTR_PAD);
+                               memcpy(val, i->value, i->value_len);
+                       }
                }
        }
        return 0;
@@ -794,7 +784,6 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
                        int offset = (char *)s->here - bs->bh->b_data;
 
                        unlock_buffer(bs->bh);
-                       ext4_handle_release_buffer(handle, bs->bh);
                        if (ce) {
                                mb_cache_entry_release(ce);
                                ce = NULL;
@@ -950,14 +939,8 @@ bad_block:
 #undef header
 }
 
-struct ext4_xattr_ibody_find {
-       struct ext4_xattr_search s;
-       struct ext4_iloc iloc;
-};
-
-static int
-ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i,
-                     struct ext4_xattr_ibody_find *is)
+int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i,
+                         struct ext4_xattr_ibody_find *is)
 {
        struct ext4_xattr_ibody_header *header;
        struct ext4_inode *raw_inode;
@@ -985,10 +968,47 @@ ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i,
        return 0;
 }
 
-static int
-ext4_xattr_ibody_set(handle_t *handle, struct inode *inode,
-                    struct ext4_xattr_info *i,
-                    struct ext4_xattr_ibody_find *is)
+int ext4_xattr_ibody_inline_set(handle_t *handle, struct inode *inode,
+                               struct ext4_xattr_info *i,
+                               struct ext4_xattr_ibody_find *is)
+{
+       struct ext4_xattr_ibody_header *header;
+       struct ext4_xattr_search *s = &is->s;
+       int error;
+
+       if (EXT4_I(inode)->i_extra_isize == 0)
+               return -ENOSPC;
+       error = ext4_xattr_set_entry(i, s);
+       if (error) {
+               if (error == -ENOSPC &&
+                   ext4_has_inline_data(inode)) {
+                       error = ext4_try_to_evict_inline_data(handle, inode,
+                                       EXT4_XATTR_LEN(strlen(i->name) +
+                                       EXT4_XATTR_SIZE(i->value_len)));
+                       if (error)
+                               return error;
+                       error = ext4_xattr_ibody_find(inode, i, is);
+                       if (error)
+                               return error;
+                       error = ext4_xattr_set_entry(i, s);
+               }
+               if (error)
+                       return error;
+       }
+       header = IHDR(inode, ext4_raw_inode(&is->iloc));
+       if (!IS_LAST_ENTRY(s->first)) {
+               header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
+               ext4_set_inode_state(inode, EXT4_STATE_XATTR);
+       } else {
+               header->h_magic = cpu_to_le32(0);
+               ext4_clear_inode_state(inode, EXT4_STATE_XATTR);
+       }
+       return 0;
+}
+
+static int ext4_xattr_ibody_set(handle_t *handle, struct inode *inode,
+                               struct ext4_xattr_info *i,
+                               struct ext4_xattr_ibody_find *is)
 {
        struct ext4_xattr_ibody_header *header;
        struct ext4_xattr_search *s = &is->s;
@@ -1144,9 +1164,17 @@ ext4_xattr_set(struct inode *inode, int name_index, const char *name,
 {
        handle_t *handle;
        int error, retries = 0;
+       int credits = EXT4_DATA_TRANS_BLOCKS(inode->i_sb);
 
 retry:
-       handle = ext4_journal_start(inode, EXT4_DATA_TRANS_BLOCKS(inode->i_sb));
+       /*
+        * In case of inline data, we may push out the data to a block,
+        * So reserve the journal space first.
+        */
+       if (ext4_has_inline_data(inode))
+               credits += ext4_writepage_trans_blocks(inode) + 1;
+
+       handle = ext4_journal_start(inode, credits);
        if (IS_ERR(handle)) {
                error = PTR_ERR(handle);
        } else {
index 91f31ca..69eda78 100644 (file)
@@ -21,6 +21,7 @@
 #define EXT4_XATTR_INDEX_TRUSTED               4
 #define        EXT4_XATTR_INDEX_LUSTRE                 5
 #define EXT4_XATTR_INDEX_SECURITY              6
+#define EXT4_XATTR_INDEX_SYSTEM                        7
 
 struct ext4_xattr_header {
        __le32  h_magic;        /* magic number for identification */
@@ -65,7 +66,32 @@ struct ext4_xattr_entry {
                EXT4_I(inode)->i_extra_isize))
 #define IFIRST(hdr) ((struct ext4_xattr_entry *)((hdr)+1))
 
-# ifdef CONFIG_EXT4_FS_XATTR
+#define BHDR(bh) ((struct ext4_xattr_header *)((bh)->b_data))
+#define ENTRY(ptr) ((struct ext4_xattr_entry *)(ptr))
+#define BFIRST(bh) ENTRY(BHDR(bh)+1)
+#define IS_LAST_ENTRY(entry) (*(__u32 *)(entry) == 0)
+
+#define EXT4_ZERO_XATTR_VALUE ((void *)-1)
+
+struct ext4_xattr_info {
+       int name_index;
+       const char *name;
+       const void *value;
+       size_t value_len;
+};
+
+struct ext4_xattr_search {
+       struct ext4_xattr_entry *first;
+       void *base;
+       void *end;
+       struct ext4_xattr_entry *here;
+       int not_found;
+};
+
+struct ext4_xattr_ibody_find {
+       struct ext4_xattr_search s;
+       struct ext4_iloc iloc;
+};
 
 extern const struct xattr_handler ext4_xattr_user_handler;
 extern const struct xattr_handler ext4_xattr_trusted_handler;
@@ -90,60 +116,82 @@ extern void ext4_exit_xattr(void);
 
 extern const struct xattr_handler *ext4_xattr_handlers[];
 
-# else  /* CONFIG_EXT4_FS_XATTR */
-
-static inline int
-ext4_xattr_get(struct inode *inode, int name_index, const char *name,
-              void *buffer, size_t size, int flags)
-{
-       return -EOPNOTSUPP;
-}
-
-static inline int
-ext4_xattr_set(struct inode *inode, int name_index, const char *name,
-              const void *value, size_t size, int flags)
-{
-       return -EOPNOTSUPP;
-}
-
-static inline int
-ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
-              const char *name, const void *value, size_t size, int flags)
-{
-       return -EOPNOTSUPP;
-}
-
-static inline void
-ext4_xattr_delete_inode(handle_t *handle, struct inode *inode)
-{
-}
-
-static inline void
-ext4_xattr_put_super(struct super_block *sb)
-{
-}
-
-static __init inline int
-ext4_init_xattr(void)
-{
-       return 0;
-}
-
-static inline void
-ext4_exit_xattr(void)
-{
-}
-
-static inline int
-ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
-                           struct ext4_inode *raw_inode, handle_t *handle)
-{
-       return -EOPNOTSUPP;
-}
-
-#define ext4_xattr_handlers    NULL
-
-# endif  /* CONFIG_EXT4_FS_XATTR */
+extern int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i,
+                                struct ext4_xattr_ibody_find *is);
+extern int ext4_xattr_ibody_get(struct inode *inode, int name_index,
+                               const char *name,
+                               void *buffer, size_t buffer_size);
+extern int ext4_xattr_ibody_inline_set(handle_t *handle, struct inode *inode,
+                                      struct ext4_xattr_info *i,
+                                      struct ext4_xattr_ibody_find *is);
+
+extern int ext4_has_inline_data(struct inode *inode);
+extern int ext4_get_inline_size(struct inode *inode);
+extern int ext4_get_max_inline_size(struct inode *inode);
+extern int ext4_find_inline_data_nolock(struct inode *inode);
+extern void ext4_write_inline_data(struct inode *inode,
+                                  struct ext4_iloc *iloc,
+                                  void *buffer, loff_t pos,
+                                  unsigned int len);
+extern int ext4_prepare_inline_data(handle_t *handle, struct inode *inode,
+                                   unsigned int len);
+extern int ext4_init_inline_data(handle_t *handle, struct inode *inode,
+                                unsigned int len);
+extern int ext4_destroy_inline_data(handle_t *handle, struct inode *inode);
+
+extern int ext4_readpage_inline(struct inode *inode, struct page *page);
+extern int ext4_try_to_write_inline_data(struct address_space *mapping,
+                                        struct inode *inode,
+                                        loff_t pos, unsigned len,
+                                        unsigned flags,
+                                        struct page **pagep);
+extern int ext4_write_inline_data_end(struct inode *inode,
+                                     loff_t pos, unsigned len,
+                                     unsigned copied,
+                                     struct page *page);
+extern struct buffer_head *
+ext4_journalled_write_inline_data(struct inode *inode,
+                                 unsigned len,
+                                 struct page *page);
+extern int ext4_da_write_inline_data_begin(struct address_space *mapping,
+                                          struct inode *inode,
+                                          loff_t pos, unsigned len,
+                                          unsigned flags,
+                                          struct page **pagep,
+                                          void **fsdata);
+extern int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos,
+                                        unsigned len, unsigned copied,
+                                        struct page *page);
+extern int ext4_try_add_inline_entry(handle_t *handle, struct dentry *dentry,
+                                    struct inode *inode);
+extern int ext4_try_create_inline_dir(handle_t *handle,
+                                     struct inode *parent,
+                                     struct inode *inode);
+extern int ext4_read_inline_dir(struct file *filp,
+                               void *dirent, filldir_t filldir,
+                               int *has_inline_data);
+extern struct buffer_head *ext4_find_inline_entry(struct inode *dir,
+                                       const struct qstr *d_name,
+                                       struct ext4_dir_entry_2 **res_dir,
+                                       int *has_inline_data);
+extern int ext4_delete_inline_entry(handle_t *handle,
+                                   struct inode *dir,
+                                   struct ext4_dir_entry_2 *de_del,
+                                   struct buffer_head *bh,
+                                   int *has_inline_data);
+extern int empty_inline_dir(struct inode *dir, int *has_inline_data);
+extern struct buffer_head *ext4_get_first_inline_block(struct inode *inode,
+                                       struct ext4_dir_entry_2 **parent_de,
+                                       int *retval);
+extern int ext4_inline_data_fiemap(struct inode *inode,
+                                  struct fiemap_extent_info *fieinfo,
+                                  int *has_inline);
+extern int ext4_try_to_evict_inline_data(handle_t *handle,
+                                        struct inode *inode,
+                                        int needed);
+extern void ext4_inline_data_truncate(struct inode *inode, int *has_inline);
+
+extern int ext4_convert_inline_data(struct inode *inode);
 
 #ifdef CONFIG_EXT4_FS_SECURITY
 extern int ext4_init_security(handle_t *handle, struct inode *inode,
index 484b8d1..dbf41f9 100644 (file)
@@ -60,7 +60,6 @@ EXPORT_SYMBOL(jbd2_journal_get_create_access);
 EXPORT_SYMBOL(jbd2_journal_get_undo_access);
 EXPORT_SYMBOL(jbd2_journal_set_triggers);
 EXPORT_SYMBOL(jbd2_journal_dirty_metadata);
-EXPORT_SYMBOL(jbd2_journal_release_buffer);
 EXPORT_SYMBOL(jbd2_journal_forget);
 #if 0
 EXPORT_SYMBOL(journal_sync_buffer);
index d8da40e..42f6615 100644 (file)
@@ -1207,17 +1207,6 @@ out:
        return ret;
 }
 
-/*
- * jbd2_journal_release_buffer: undo a get_write_access without any buffer
- * updates, if the update decided in the end that it didn't need access.
- *
- */
-void
-jbd2_journal_release_buffer(handle_t *handle, struct buffer_head *bh)
-{
-       BUFFER_TRACE(bh, "entry");
-}
-
 /**
  * void jbd2_journal_forget() - bforget() for potentially-journaled buffers.
  * @handle: transaction handle
index 9cc4a3f..bc3968f 100644 (file)
@@ -193,19 +193,15 @@ static int nfs_idmap_init_keyring(void)
        if (!cred)
                return -ENOMEM;
 
-       keyring = key_alloc(&key_type_keyring, ".id_resolver", 0, 0, cred,
-                            (KEY_POS_ALL & ~KEY_POS_SETATTR) |
-                            KEY_USR_VIEW | KEY_USR_READ,
-                            KEY_ALLOC_NOT_IN_QUOTA);
+       keyring = keyring_alloc(".id_resolver", 0, 0, cred,
+                               (KEY_POS_ALL & ~KEY_POS_SETATTR) |
+                               KEY_USR_VIEW | KEY_USR_READ,
+                               KEY_ALLOC_NOT_IN_QUOTA, NULL);
        if (IS_ERR(keyring)) {
                ret = PTR_ERR(keyring);
                goto failed_put_cred;
        }
 
-       ret = key_instantiate_and_link(keyring, NULL, 0, NULL, NULL);
-       if (ret < 0)
-               goto failed_put_key;
-
        ret = register_key_type(&key_type_id_resolver);
        if (ret < 0)
                goto failed_put_key;
index af1661f..c7314f1 100644 (file)
@@ -307,6 +307,8 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
        }
 }
 
+#ifdef CONFIG_BLOCK
+
 /* Return 1 if 'cmd' will block on frozen filesystem */
 static int quotactl_cmd_write(int cmd)
 {
@@ -322,6 +324,8 @@ static int quotactl_cmd_write(int cmd)
        return 1;
 }
 
+#endif /* CONFIG_BLOCK */
+
 /*
  * look up a superblock on which quota ops will be performed
  * - use the name of a block device to find the superblock thereon
index df88b95..cbae1ed 100644 (file)
@@ -587,7 +587,6 @@ out:
 static sector_t inode_getblk(struct inode *inode, sector_t block,
                             int *err, int *new)
 {
-       static sector_t last_block;
        struct kernel_long_ad laarr[EXTENT_MERGE_SIZE];
        struct extent_position prev_epos, cur_epos, next_epos;
        int count = 0, startnum = 0, endnum = 0;
@@ -601,6 +600,7 @@ static sector_t inode_getblk(struct inode *inode, sector_t block,
        struct udf_inode_info *iinfo = UDF_I(inode);
        int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
        int lastblock = 0;
+       bool isBeyondEOF;
 
        *err = 0;
        *new = 0;
@@ -676,11 +676,10 @@ static sector_t inode_getblk(struct inode *inode, sector_t block,
                return newblock;
        }
 
-       last_block = block;
        /* Are we beyond EOF? */
        if (etype == -1) {
                int ret;
-
+               isBeyondEOF = 1;
                if (count) {
                        if (c)
                                laarr[0] = laarr[1];
@@ -718,11 +717,11 @@ static sector_t inode_getblk(struct inode *inode, sector_t block,
                        memset(&laarr[c].extLocation, 0x00,
                                sizeof(struct kernel_lb_addr));
                        count++;
-                       endnum++;
                }
                endnum = c + 1;
                lastblock = 1;
        } else {
+               isBeyondEOF = 0;
                endnum = startnum = ((count > 2) ? 2 : count);
 
                /* if the current extent is in position 0,
@@ -765,10 +764,13 @@ static sector_t inode_getblk(struct inode *inode, sector_t block,
                                goal, err);
                if (!newblocknum) {
                        brelse(prev_epos.bh);
+                       brelse(cur_epos.bh);
+                       brelse(next_epos.bh);
                        *err = -ENOSPC;
                        return 0;
                }
-               iinfo->i_lenExtents += inode->i_sb->s_blocksize;
+               if (isBeyondEOF)
+                       iinfo->i_lenExtents += inode->i_sb->s_blocksize;
        }
 
        /* if the extent the requsted block is located in contains multiple
@@ -795,6 +797,8 @@ static sector_t inode_getblk(struct inode *inode, sector_t block,
        udf_update_extents(inode, laarr, startnum, endnum, &prev_epos);
 
        brelse(prev_epos.bh);
+       brelse(cur_epos.bh);
+       brelse(next_epos.bh);
 
        newblock = udf_get_pblock(inode->i_sb, newblocknum,
                                iinfo->i_location.partitionReferenceNum, 0);
index 284e808..701beab 100644 (file)
@@ -219,6 +219,10 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
 #define move_pte(pte, prot, old_addr, new_addr)        (pte)
 #endif
 
+#ifndef pte_accessible
+# define pte_accessible(pte)           ((void)(pte),1)
+#endif
+
 #ifndef flush_tlb_fix_spurious_fault
 #define flush_tlb_fix_spurious_fault(vma, address) flush_tlb_page(vma, address)
 #endif
@@ -580,6 +584,112 @@ static inline int pmd_trans_unstable(pmd_t *pmd)
 #endif
 }
 
+#ifdef CONFIG_NUMA_BALANCING
+#ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE
+/*
+ * _PAGE_NUMA works identical to _PAGE_PROTNONE (it's actually the
+ * same bit too). It's set only when _PAGE_PRESET is not set and it's
+ * never set if _PAGE_PRESENT is set.
+ *
+ * pte/pmd_present() returns true if pte/pmd_numa returns true. Page
+ * fault triggers on those regions if pte/pmd_numa returns true
+ * (because _PAGE_PRESENT is not set).
+ */
+#ifndef pte_numa
+static inline int pte_numa(pte_t pte)
+{
+       return (pte_flags(pte) &
+               (_PAGE_NUMA|_PAGE_PRESENT)) == _PAGE_NUMA;
+}
+#endif
+
+#ifndef pmd_numa
+static inline int pmd_numa(pmd_t pmd)
+{
+       return (pmd_flags(pmd) &
+               (_PAGE_NUMA|_PAGE_PRESENT)) == _PAGE_NUMA;
+}
+#endif
+
+/*
+ * pte/pmd_mknuma sets the _PAGE_ACCESSED bitflag automatically
+ * because they're called by the NUMA hinting minor page fault. If we
+ * wouldn't set the _PAGE_ACCESSED bitflag here, the TLB miss handler
+ * would be forced to set it later while filling the TLB after we
+ * return to userland. That would trigger a second write to memory
+ * that we optimize away by setting _PAGE_ACCESSED here.
+ */
+#ifndef pte_mknonnuma
+static inline pte_t pte_mknonnuma(pte_t pte)
+{
+       pte = pte_clear_flags(pte, _PAGE_NUMA);
+       return pte_set_flags(pte, _PAGE_PRESENT|_PAGE_ACCESSED);
+}
+#endif
+
+#ifndef pmd_mknonnuma
+static inline pmd_t pmd_mknonnuma(pmd_t pmd)
+{
+       pmd = pmd_clear_flags(pmd, _PAGE_NUMA);
+       return pmd_set_flags(pmd, _PAGE_PRESENT|_PAGE_ACCESSED);
+}
+#endif
+
+#ifndef pte_mknuma
+static inline pte_t pte_mknuma(pte_t pte)
+{
+       pte = pte_set_flags(pte, _PAGE_NUMA);
+       return pte_clear_flags(pte, _PAGE_PRESENT);
+}
+#endif
+
+#ifndef pmd_mknuma
+static inline pmd_t pmd_mknuma(pmd_t pmd)
+{
+       pmd = pmd_set_flags(pmd, _PAGE_NUMA);
+       return pmd_clear_flags(pmd, _PAGE_PRESENT);
+}
+#endif
+#else
+extern int pte_numa(pte_t pte);
+extern int pmd_numa(pmd_t pmd);
+extern pte_t pte_mknonnuma(pte_t pte);
+extern pmd_t pmd_mknonnuma(pmd_t pmd);
+extern pte_t pte_mknuma(pte_t pte);
+extern pmd_t pmd_mknuma(pmd_t pmd);
+#endif /* CONFIG_ARCH_USES_NUMA_PROT_NONE */
+#else
+static inline int pmd_numa(pmd_t pmd)
+{
+       return 0;
+}
+
+static inline int pte_numa(pte_t pte)
+{
+       return 0;
+}
+
+static inline pte_t pte_mknonnuma(pte_t pte)
+{
+       return pte;
+}
+
+static inline pmd_t pmd_mknonnuma(pmd_t pmd)
+{
+       return pmd;
+}
+
+static inline pte_t pte_mknuma(pte_t pte)
+{
+       return pte;
+}
+
+static inline pmd_t pmd_mknuma(pmd_t pmd)
+{
+       return pmd;
+}
+#endif /* CONFIG_NUMA_BALANCING */
+
 #endif /* CONFIG_MMU */
 
 #endif /* !__ASSEMBLY__ */
index 3fd8280..fad21c9 100644 (file)
@@ -1431,6 +1431,8 @@ extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq);
 extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
 extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
                                     struct timeval *vblanktime);
+extern void drm_send_vblank_event(struct drm_device *dev, int crtc,
+                                    struct drm_pending_vblank_event *e);
 extern bool drm_handle_vblank(struct drm_device *dev, int crtc);
 extern int drm_vblank_get(struct drm_device *dev, int crtc);
 extern void drm_vblank_put(struct drm_device *dev, int crtc);
@@ -1503,6 +1505,7 @@ extern unsigned int drm_debug;
 
 extern unsigned int drm_vblank_offdelay;
 extern unsigned int drm_timestamp_precision;
+extern unsigned int drm_timestamp_monotonic;
 
 extern struct class *drm_class;
 extern struct proc_dir_entry *drm_proc_root;
index 3fa18b7..00d78b5 100644 (file)
@@ -792,6 +792,7 @@ struct drm_mode_config {
 
        /* output poll support */
        bool poll_enabled;
+       bool poll_running;
        struct delayed_work output_poll_work;
 
        /* pointers to standard properties */
@@ -887,14 +888,14 @@ extern void drm_mode_remove(struct drm_connector *connector, struct drm_display_
 extern void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src);
 extern struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
                                                   const struct drm_display_mode *mode);
-extern void drm_mode_debug_printmodeline(struct drm_display_mode *mode);
+extern void drm_mode_debug_printmodeline(const struct drm_display_mode *mode);
 extern void drm_mode_config_init(struct drm_device *dev);
 extern void drm_mode_config_reset(struct drm_device *dev);
 extern void drm_mode_config_cleanup(struct drm_device *dev);
 extern void drm_mode_set_name(struct drm_display_mode *mode);
-extern bool drm_mode_equal(struct drm_display_mode *mode1, struct drm_display_mode *mode2);
-extern int drm_mode_width(struct drm_display_mode *mode);
-extern int drm_mode_height(struct drm_display_mode *mode);
+extern bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2);
+extern int drm_mode_width(const struct drm_display_mode *mode);
+extern int drm_mode_height(const struct drm_display_mode *mode);
 
 /* for us by fb module */
 extern int drm_mode_attachmode_crtc(struct drm_device *dev,
@@ -919,12 +920,6 @@ extern void drm_mode_set_crtcinfo(struct drm_display_mode *p,
 extern void drm_mode_connector_list_update(struct drm_connector *connector);
 extern int drm_mode_connector_update_edid_property(struct drm_connector *connector,
                                                struct edid *edid);
-extern int drm_connector_property_set_value(struct drm_connector *connector,
-                                        struct drm_property *property,
-                                        uint64_t value);
-extern int drm_connector_property_get_value(struct drm_connector *connector,
-                                        struct drm_property *property,
-                                        uint64_t *value);
 extern int drm_object_property_set_value(struct drm_mode_object *obj,
                                         struct drm_property *property,
                                         uint64_t val);
@@ -946,8 +941,6 @@ extern int drmfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
 extern void drm_crtc_probe_connector_modes(struct drm_device *dev, int maxX, int maxY);
 extern bool drm_crtc_in_use(struct drm_crtc *crtc);
 
-extern void drm_connector_attach_property(struct drm_connector *connector,
-                                         struct drm_property *property, uint64_t init_val);
 extern void drm_object_attach_property(struct drm_mode_object *obj,
                                       struct drm_property *property,
                                       uint64_t init_val);
@@ -1037,6 +1030,7 @@ extern int drm_mode_gamma_get_ioctl(struct drm_device *dev,
 extern int drm_mode_gamma_set_ioctl(struct drm_device *dev,
                                    void *data, struct drm_file *file_priv);
 extern u8 *drm_find_cea_extension(struct edid *edid);
+extern u8 drm_match_cea_mode(struct drm_display_mode *to_match);
 extern bool drm_detect_hdmi_monitor(struct edid *edid);
 extern bool drm_detect_monitor_audio(struct edid *edid);
 extern int drm_mode_page_flip_ioctl(struct drm_device *dev,
@@ -1053,6 +1047,7 @@ extern struct drm_display_mode *drm_gtf_mode_complex(struct drm_device *dev,
                                int GTF_2C, int GTF_K, int GTF_2J);
 extern int drm_add_modes_noedid(struct drm_connector *connector,
                                int hdisplay, int vdisplay);
+extern uint8_t drm_mode_cea_vic(const struct drm_display_mode *mode);
 
 extern int drm_edid_header_is_valid(const u8 *raw_edid);
 extern bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid);
index e01cc80..f43d556 100644 (file)
@@ -137,6 +137,8 @@ extern bool drm_helper_encoder_in_use(struct drm_encoder *encoder);
 
 extern void drm_helper_connector_dpms(struct drm_connector *connector, int mode);
 
+extern void drm_helper_move_panel_connectors_to_head(struct drm_device *);
+
 extern int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
                                          struct drm_mode_fb_cmd2 *mode_cmd);
 
@@ -162,6 +164,7 @@ extern int drm_helper_resume_force_mode(struct drm_device *dev);
 extern void drm_kms_helper_poll_init(struct drm_device *dev);
 extern void drm_kms_helper_poll_fini(struct drm_device *dev);
 extern void drm_helper_hpd_irq_event(struct drm_device *dev);
+extern void drm_kms_helper_hotplug_event(struct drm_device *dev);
 
 extern void drm_kms_helper_poll_disable(struct drm_device *dev);
 extern void drm_kms_helper_poll_enable(struct drm_device *dev);
index fe06148..e8e1417 100644 (file)
@@ -25,6 +25,7 @@
 
 #include <linux/types.h>
 #include <linux/i2c.h>
+#include <linux/delay.h>
 
 /*
  * Unless otherwise noted, all values are from the DP 1.1a spec.  Note that
 #define MODE_I2C_READ  4
 #define MODE_I2C_STOP  8
 
+/**
+ * struct i2c_algo_dp_aux_data - driver interface structure for i2c over dp
+ *                              aux algorithm
+ * @running: set by the algo indicating whether an i2c is ongoing or whether
+ *          the i2c bus is quiescent
+ * @address: i2c target address for the currently ongoing transfer
+ * @aux_ch: driver callback to transfer a single byte of the i2c payload
+ */
 struct i2c_algo_dp_aux_data {
        bool running;
        u16 address;
@@ -322,4 +331,34 @@ struct i2c_algo_dp_aux_data {
 int
 i2c_dp_aux_add_bus(struct i2c_adapter *adapter);
 
+
+#define DP_LINK_STATUS_SIZE       6
+bool drm_dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
+                         int lane_count);
+bool drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
+                             int lane_count);
+u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
+                                    int lane);
+u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
+                                         int lane);
+
+#define DP_RECEIVER_CAP_SIZE   0xf
+void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]);
+void drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]);
+
+u8 drm_dp_link_rate_to_bw_code(int link_rate);
+int drm_dp_bw_code_to_link_rate(u8 link_bw);
+
+static inline int
+drm_dp_max_link_rate(u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+       return drm_dp_bw_code_to_link_rate(dpcd[DP_MAX_LINK_RATE]);
+}
+
+static inline u8
+drm_dp_max_lane_count(u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+       return dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
+}
+
 #endif /* _DRM_DP_HELPER_H_ */
index 3650d5d..fce2ef3 100644 (file)
@@ -61,5 +61,19 @@ extern int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key);
 extern int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item);
 extern void drm_ht_remove(struct drm_open_hash *ht);
 
+/*
+ * RCU-safe interface
+ *
+ * The user of this API needs to make sure that two or more instances of the
+ * hash table manipulation functions are never run simultaneously.
+ * The lookup function drm_ht_find_item_rcu may, however, run simultaneously
+ * with any of the manipulation functions as long as it's called from within
+ * an RCU read-locked section.
+ */
+#define drm_ht_insert_item_rcu drm_ht_insert_item
+#define drm_ht_just_insert_please_rcu drm_ht_just_insert_please
+#define drm_ht_remove_key_rcu drm_ht_remove_key
+#define drm_ht_remove_item_rcu drm_ht_remove_item
+#define drm_ht_find_item_rcu drm_ht_find_item
 
 #endif
index 3c13a3a..808dad2 100644 (file)
@@ -85,4 +85,30 @@ struct exynos_drm_hdmi_pdata {
        int (*get_hpd)(void);
 };
 
+/**
+ * Platform Specific Structure for DRM based IPP.
+ *
+ * @inv_pclk: if set 1. invert pixel clock
+ * @inv_vsync: if set 1. invert vsync signal for wb
+ * @inv_href: if set 1. invert href signal
+ * @inv_hsync: if set 1. invert hsync signal for wb
+ */
+struct exynos_drm_ipp_pol {
+       unsigned int inv_pclk;
+       unsigned int inv_vsync;
+       unsigned int inv_href;
+       unsigned int inv_hsync;
+};
+
+/**
+ * Platform Specific Structure for DRM based FIMC.
+ *
+ * @pol: current hardware block polarity settings.
+ * @clk_rate: current hardware clock rate.
+ */
+struct exynos_drm_fimc_pdata {
+       struct exynos_drm_ipp_pol pol;
+       int clk_rate;
+};
+
 #endif /* _EXYNOS_DRM_H_ */
index 2e37e9f..6eb76a1 100644 (file)
@@ -3,7 +3,7 @@
 #ifndef _DRM_INTEL_GTT_H
 #define        _DRM_INTEL_GTT_H
 
-const struct intel_gtt {
+struct intel_gtt {
        /* Size of memory reserved for graphics by the BIOS */
        unsigned int stolen_size;
        /* Total number of gtt entries. */
@@ -17,6 +17,7 @@ const struct intel_gtt {
        unsigned int do_idle_maps : 1;
        /* Share the scratch page dma with ppgtts. */
        dma_addr_t scratch_page_dma;
+       struct page *scratch_page;
        /* for ppgtt PDE access */
        u32 __iomem *gtt;
        /* needed for ioremap in drm/i915 */
@@ -39,10 +40,6 @@ void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries);
 #define AGP_DCACHE_MEMORY      1
 #define AGP_PHYS_MEMORY                2
 
-/* New caching attributes for gen6/sandybridge */
-#define AGP_USER_CACHED_MEMORY_LLC_MLC (AGP_USER_TYPES + 2)
-#define AGP_USER_UNCACHED_MEMORY (AGP_USER_TYPES + 4)
-
 /* flag for GFDT type */
 #define AGP_USER_CACHED_MEMORY_GFDT (1 << 3)
 
index e8028ad..3cb5d84 100644 (file)
@@ -141,8 +141,6 @@ struct ttm_tt;
  * struct ttm_buffer_object
  *
  * @bdev: Pointer to the buffer object device structure.
- * @buffer_start: The virtual user-space start address of ttm_bo_type_user
- * buffers.
  * @type: The bo type.
  * @destroy: Destruction function. If NULL, kfree is used.
  * @num_pages: Actual number of pages.
@@ -172,7 +170,6 @@ struct ttm_tt;
  * @seq_valid: The value of @val_seq is valid. This value is protected by
  * the bo_device::lru_lock.
  * @reserved: Deadlock-free lock used for synchronization state transitions.
- * @sync_obj_arg: Opaque argument to synchronization object function.
  * @sync_obj: Pointer to a synchronization object.
  * @priv_flags: Flags describing buffer object internal state.
  * @vm_rb: Rb node for the vm rb tree.
@@ -200,7 +197,6 @@ struct ttm_buffer_object {
 
        struct ttm_bo_global *glob;
        struct ttm_bo_device *bdev;
-       unsigned long buffer_start;
        enum ttm_bo_type type;
        void (*destroy) (struct ttm_buffer_object *);
        unsigned long num_pages;
@@ -255,7 +251,6 @@ struct ttm_buffer_object {
         * checking NULL while reserved but not holding the mentioned lock.
         */
 
-       void *sync_obj_arg;
        void *sync_obj;
        unsigned long priv_flags;
 
@@ -342,7 +337,6 @@ extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
  * @bo: The buffer object.
  * @placement: Proposed placement for the buffer object.
  * @interruptible: Sleep interruptible if sleeping.
- * @no_wait_reserve: Return immediately if other buffers are busy.
  * @no_wait_gpu: Return immediately if the GPU is busy.
  *
  * Changes placement and caching policy of the buffer object
@@ -355,7 +349,7 @@ extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
  */
 extern int ttm_bo_validate(struct ttm_buffer_object *bo,
                                struct ttm_placement *placement,
-                               bool interruptible, bool no_wait_reserve,
+                               bool interruptible,
                                bool no_wait_gpu);
 
 /**
@@ -429,8 +423,9 @@ extern void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev,
  * @no_wait: Return immediately if buffer is busy.
  *
  * Synchronizes a buffer object for CPU RW access. This means
- * blocking command submission that affects the buffer and
- * waiting for buffer idle. This lock is recursive.
+ * command submission that affects the buffer will return -EBUSY
+ * until ttm_bo_synccpu_write_release is called.
+ *
  * Returns
  * -EBUSY if the buffer is busy and no_wait is true.
  * -ERESTARTSYS if interrupted by a signal.
@@ -472,8 +467,6 @@ size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
  * @type: Requested type of buffer object.
  * @flags: Initial placement flags.
  * @page_alignment: Data alignment in pages.
- * @buffer_start: Virtual address of user space data backing a
- * user buffer object.
  * @interruptible: If needing to sleep to wait for GPU resources,
  * sleep interruptible.
  * @persistent_swap_storage: Usually the swap storage is deleted for buffers
@@ -505,7 +498,6 @@ extern int ttm_bo_init(struct ttm_bo_device *bdev,
                        enum ttm_bo_type type,
                        struct ttm_placement *placement,
                        uint32_t page_alignment,
-                       unsigned long buffer_start,
                        bool interrubtible,
                        struct file *persistent_swap_storage,
                        size_t acc_size,
@@ -521,8 +513,6 @@ extern int ttm_bo_init(struct ttm_bo_device *bdev,
  * @type: Requested type of buffer object.
  * @flags: Initial placement flags.
  * @page_alignment: Data alignment in pages.
- * @buffer_start: Virtual address of user space data backing a
- * user buffer object.
  * @interruptible: If needing to sleep while waiting for GPU resources,
  * sleep interruptible.
  * @persistent_swap_storage: Usually the swap storage is deleted for buffers
@@ -545,7 +535,6 @@ extern int ttm_bo_create(struct ttm_bo_device *bdev,
                                enum ttm_bo_type type,
                                struct ttm_placement *placement,
                                uint32_t page_alignment,
-                               unsigned long buffer_start,
                                bool interruptible,
                                struct file *persistent_swap_storage,
                                struct ttm_buffer_object **p_bo);
@@ -736,4 +725,18 @@ extern ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
 
 extern void ttm_bo_swapout_all(struct ttm_bo_device *bdev);
 
+/**
+ * ttm_bo_is_reserved - return an indication if a ttm buffer object is reserved
+ *
+ * @bo:     The buffer object to check.
+ *
+ * This function returns an indication if a bo is reserved or not, and should
+ * only be used to print an error when it is not from incorrect api usage, since
+ * there's no guarantee that it is the caller that is holding the reservation.
+ */
+static inline bool ttm_bo_is_reserved(struct ttm_buffer_object *bo)
+{
+       return atomic_read(&bo->reserved);
+}
+
 #endif
index d803b92..e3a43a4 100644 (file)
@@ -394,7 +394,7 @@ struct ttm_bo_driver {
         */
        int (*move) (struct ttm_buffer_object *bo,
                     bool evict, bool interruptible,
-                    bool no_wait_reserve, bool no_wait_gpu,
+                    bool no_wait_gpu,
                     struct ttm_mem_reg *new_mem);
 
        /**
@@ -422,10 +422,10 @@ struct ttm_bo_driver {
         * documentation.
         */
 
-       bool (*sync_obj_signaled) (void *sync_obj, void *sync_arg);
-       int (*sync_obj_wait) (void *sync_obj, void *sync_arg,
+       bool (*sync_obj_signaled) (void *sync_obj);
+       int (*sync_obj_wait) (void *sync_obj,
                              bool lazy, bool interruptible);
-       int (*sync_obj_flush) (void *sync_obj, void *sync_arg);
+       int (*sync_obj_flush) (void *sync_obj);
        void (*sync_obj_unref) (void **sync_obj);
        void *(*sync_obj_ref) (void *sync_obj);
 
@@ -521,8 +521,6 @@ struct ttm_bo_global {
  * lru_lock: Spinlock that protects the buffer+device lru lists and
  * ddestroy lists.
  * @val_seq: Current validation sequence.
- * @nice_mode: Try nicely to wait for buffer idle when cleaning a manager.
- * If a GPU lockup has been detected, this is forced to 0.
  * @dev_mapping: A pointer to the struct address_space representing the
  * device address space.
  * @wq: Work queue structure for the delayed delete workqueue.
@@ -556,7 +554,6 @@ struct ttm_bo_device {
         * Protected by load / firstopen / lastclose /unload sync.
         */
 
-       bool nice_mode;
        struct address_space *dev_mapping;
 
        /*
@@ -706,7 +703,6 @@ extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
  * @proposed_placement: Proposed new placement for the buffer object.
  * @mem: A struct ttm_mem_reg.
  * @interruptible: Sleep interruptible when sliping.
- * @no_wait_reserve: Return immediately if other buffers are busy.
  * @no_wait_gpu: Return immediately if the GPU is busy.
  *
  * Allocate memory space for the buffer object pointed to by @bo, using
@@ -722,27 +718,13 @@ extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
                                struct ttm_placement *placement,
                                struct ttm_mem_reg *mem,
                                bool interruptible,
-                               bool no_wait_reserve, bool no_wait_gpu);
+                               bool no_wait_gpu);
 
 extern void ttm_bo_mem_put(struct ttm_buffer_object *bo,
                           struct ttm_mem_reg *mem);
 extern void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo,
                                  struct ttm_mem_reg *mem);
 
-/**
- * ttm_bo_wait_for_cpu
- *
- * @bo: Pointer to a struct ttm_buffer_object.
- * @no_wait: Don't sleep while waiting.
- *
- * Wait until a buffer object is no longer sync'ed for CPU access.
- * Returns:
- * -EBUSY: Buffer object was sync'ed for CPU access. (only if no_wait == 1).
- * -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
- */
-
-extern int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait);
-
 extern void ttm_bo_global_release(struct drm_global_reference *ref);
 extern int ttm_bo_global_init(struct drm_global_reference *ref);
 
@@ -918,7 +900,6 @@ extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
  *
  * @bo: A pointer to a struct ttm_buffer_object.
  * @evict: 1: This is an eviction. Don't try to pipeline.
- * @no_wait_reserve: Return immediately if other buffers are busy.
  * @no_wait_gpu: Return immediately if the GPU is busy.
  * @new_mem: struct ttm_mem_reg indicating where to move.
  *
@@ -933,15 +914,14 @@ extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
  */
 
 extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
-                          bool evict, bool no_wait_reserve,
-                          bool no_wait_gpu, struct ttm_mem_reg *new_mem);
+                          bool evict, bool no_wait_gpu,
+                          struct ttm_mem_reg *new_mem);
 
 /**
  * ttm_bo_move_memcpy
  *
  * @bo: A pointer to a struct ttm_buffer_object.
  * @evict: 1: This is an eviction. Don't try to pipeline.
- * @no_wait_reserve: Return immediately if other buffers are busy.
  * @no_wait_gpu: Return immediately if the GPU is busy.
  * @new_mem: struct ttm_mem_reg indicating where to move.
  *
@@ -956,8 +936,8 @@ extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
  */
 
 extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
-                             bool evict, bool no_wait_reserve,
-                             bool no_wait_gpu, struct ttm_mem_reg *new_mem);
+                             bool evict, bool no_wait_gpu,
+                             struct ttm_mem_reg *new_mem);
 
 /**
  * ttm_bo_free_old_node
@@ -973,10 +953,7 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
  *
  * @bo: A pointer to a struct ttm_buffer_object.
  * @sync_obj: A sync object that signals when moving is complete.
- * @sync_obj_arg: An argument to pass to the sync object idle / wait
- * functions.
  * @evict: This is an evict move. Don't return until the buffer is idle.
- * @no_wait_reserve: Return immediately if other buffers are busy.
  * @no_wait_gpu: Return immediately if the GPU is busy.
  * @new_mem: struct ttm_mem_reg indicating where to move.
  *
@@ -990,9 +967,7 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
 
 extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
                                     void *sync_obj,
-                                    void *sync_obj_arg,
-                                    bool evict, bool no_wait_reserve,
-                                    bool no_wait_gpu,
+                                    bool evict, bool no_wait_gpu,
                                     struct ttm_mem_reg *new_mem);
 /**
  * ttm_io_prot
index 1926cae..547e19f 100644 (file)
@@ -39,8 +39,6 @@
  *
  * @head:           list head for thread-private list.
  * @bo:             refcounted buffer object pointer.
- * @new_sync_obj_arg: New sync_obj_arg for @bo, to be used once
- * adding a new sync object.
  * @reserved:       Indicates whether @bo has been reserved for validation.
  * @removed:        Indicates whether @bo has been removed from lru lists.
  * @put_count:      Number of outstanding references on bo::list_kref.
@@ -50,7 +48,6 @@
 struct ttm_validate_buffer {
        struct list_head head;
        struct ttm_buffer_object *bo;
-       void *new_sync_obj_arg;
        bool reserved;
        bool removed;
        int put_count;
index d6d1da4..72dcbe8 100644 (file)
@@ -60,7 +60,6 @@ struct ttm_mem_shrink {
  * for the GPU, and this will otherwise block other workqueue tasks(?)
  * At this point we use only a single-threaded workqueue.
  * @work: The workqueue callback for the shrink queue.
- * @queue: Wait queue for processes suspended waiting for memory.
  * @lock: Lock to protect the @shrink - and the memory accounting members,
  * that is, essentially the whole structure with some exceptions.
  * @zones: Array of pointers to accounting zones.
@@ -80,7 +79,6 @@ struct ttm_mem_global {
        struct ttm_mem_shrink *shrink;
        struct workqueue_struct *swap_queue;
        struct work_struct work;
-       wait_queue_head_t queue;
        spinlock_t lock;
        struct ttm_mem_zone *zones[TTM_MEM_MAX_ZONES];
        unsigned int num_zones;
index b01c563..fc0cf06 100644 (file)
@@ -40,6 +40,7 @@
 #include <linux/list.h>
 #include <drm/drm_hashtab.h>
 #include <linux/kref.h>
+#include <linux/rcupdate.h>
 #include <ttm/ttm_memory.h>
 
 /**
@@ -120,6 +121,7 @@ struct ttm_object_device;
  */
 
 struct ttm_base_object {
+       struct rcu_head rhead;
        struct drm_hash_item hash;
        enum ttm_object_type object_type;
        bool shareable;
@@ -268,4 +270,6 @@ extern struct ttm_object_device *ttm_object_device_init
 
 extern void ttm_object_device_release(struct ttm_object_device **p_tdev);
 
+#define ttm_base_object_kfree(__object, __base)\
+       kfree_rcu(__object, __base.rhead)
 #endif
index 2a9a9ab..238521a 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/writeback.h>
 #include <linux/atomic.h>
 #include <linux/sysctl.h>
+#include <linux/mutex.h>
 
 struct page;
 struct device;
@@ -105,6 +106,9 @@ struct backing_dev_info {
 
        struct timer_list laptop_mode_wb_timer;
 
+       cpumask_t *flusher_cpumask; /* used for writeback thread scheduling */
+       struct mutex flusher_cpumask_lock;
+
 #ifdef CONFIG_DEBUG_FS
        struct dentry *debug_dir;
        struct dentry *debug_stats;
index 1756001..acb4f7b 100644 (file)
@@ -378,6 +378,12 @@ struct request_queue {
 
        unsigned int            nr_sorted;
        unsigned int            in_flight[2];
+       /*
+        * Number of active block driver functions for which blk_drain_queue()
+        * must wait. Must be incremented around functions that unlock the
+        * queue_lock internally, e.g. scsi_request_fn().
+        */
+       unsigned int            request_fn_active;
 
        unsigned int            rq_timeout;
        struct timer_list       timeout;
@@ -437,7 +443,7 @@ struct request_queue {
 #define QUEUE_FLAG_STOPPED     2       /* queue is stopped */
 #define        QUEUE_FLAG_SYNCFULL     3       /* read queue has been filled */
 #define QUEUE_FLAG_ASYNCFULL   4       /* write queue has been filled */
-#define QUEUE_FLAG_DEAD                5       /* queue being torn down */
+#define QUEUE_FLAG_DYING       5       /* queue being torn down */
 #define QUEUE_FLAG_BYPASS      6       /* act as dumb FIFO queue */
 #define QUEUE_FLAG_BIDI                7       /* queue supports bidi requests */
 #define QUEUE_FLAG_NOMERGES     8      /* disable merge attempts */
@@ -452,6 +458,7 @@ struct request_queue {
 #define QUEUE_FLAG_ADD_RANDOM  16      /* Contributes to random pool */
 #define QUEUE_FLAG_SECDISCARD  17      /* supports SECDISCARD */
 #define QUEUE_FLAG_SAME_FORCE  18      /* force complete on same CPU */
+#define QUEUE_FLAG_DEAD        19      /* queue tear-down finished */
 
 #define QUEUE_FLAG_DEFAULT     ((1 << QUEUE_FLAG_IO_STAT) |            \
                                 (1 << QUEUE_FLAG_STACKABLE)    |       \
@@ -521,6 +528,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
 
 #define blk_queue_tagged(q)    test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
 #define blk_queue_stopped(q)   test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
+#define blk_queue_dying(q)     test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
 #define blk_queue_dead(q)      test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
 #define blk_queue_bypass(q)    test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags)
 #define blk_queue_nomerges(q)  test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
@@ -1180,13 +1188,14 @@ static inline int queue_discard_alignment(struct request_queue *q)
 
 static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector)
 {
-       unsigned int alignment = (sector << 9) & (lim->discard_granularity - 1);
+       sector_t alignment = sector << 9;
+       alignment = sector_div(alignment, lim->discard_granularity);
 
        if (!lim->max_discard_sectors)
                return 0;
 
-       return (lim->discard_granularity + lim->discard_alignment - alignment)
-               & (lim->discard_granularity - 1);
+       alignment = lim->discard_granularity + lim->discard_alignment - alignment;
+       return sector_div(alignment, lim->discard_granularity);
 }
 
 static inline int bdev_discard_alignment(struct block_device *bdev)
index 4d0fb3d..a226652 100644 (file)
@@ -67,6 +67,5 @@ void bsg_job_done(struct bsg_job *job, int result,
 int bsg_setup_queue(struct device *dev, struct request_queue *q, char *name,
                    bsg_job_fn *job_fn, int dd_job_size);
 void bsg_request_fn(struct request_queue *q);
-void bsg_goose_queue(struct request_queue *q);
 
 #endif
index ebbed2c..0142aac 100644 (file)
@@ -77,21 +77,6 @@ extern int in_group_p(kgid_t);
 extern int in_egroup_p(kgid_t);
 
 /*
- * The common credentials for a thread group
- * - shared by CLONE_THREAD
- */
-#ifdef CONFIG_KEYS
-struct thread_group_cred {
-       atomic_t        usage;
-       pid_t           tgid;                   /* thread group process ID */
-       spinlock_t      lock;
-       struct key __rcu *session_keyring;      /* keyring inherited over fork */
-       struct key      *process_keyring;       /* keyring private to this process */
-       struct rcu_head rcu;                    /* RCU deletion hook */
-};
-#endif
-
-/*
  * The security context of a task
  *
  * The parts of the context break down into two categories:
@@ -139,6 +124,8 @@ struct cred {
 #ifdef CONFIG_KEYS
        unsigned char   jit_keyring;    /* default keyring to attach requested
                                         * keys to */
+       struct key __rcu *session_keyring; /* keyring inherited over fork */
+       struct key      *process_keyring; /* keyring private to this process */
        struct key      *thread_keyring; /* keyring private to this thread */
        struct key      *request_key_auth; /* assumed request_key authority */
        struct thread_group_cred *tgcred; /* thread-group shared credentials */
index f83f793..c8e1831 100644 (file)
@@ -17,6 +17,7 @@ enum dma_attr {
        DMA_ATTR_NON_CONSISTENT,
        DMA_ATTR_NO_KERNEL_MAPPING,
        DMA_ATTR_SKIP_CPU_SYNC,
+       DMA_ATTR_FORCE_CONTIGUOUS,
        DMA_ATTR_MAX,
 };
 
index 02a6941..8b84916 100644 (file)
@@ -587,6 +587,8 @@ extern u64 efi_mem_attribute (unsigned long phys_addr, unsigned long size);
 extern int __init efi_uart_console_only (void);
 extern void efi_initialize_iomem_resources(struct resource *code_resource,
                struct resource *data_resource, struct resource *bss_resource);
+extern unsigned long efi_get_time(void);
+extern int efi_set_rtc_mmss(unsigned long nowtime);
 extern void efi_reserve_boot_services(void);
 extern struct efi_memory_map memmap;
 
index 2c26c14..fcb51c8 100644 (file)
@@ -23,7 +23,9 @@
 #ifndef __LINUX_EXTCON_H__
 #define __LINUX_EXTCON_H__
 
+#include <linux/device.h>
 #include <linux/notifier.h>
+#include <linux/sysfs.h>
 
 #define SUPPORTED_CABLE_MAX    32
 #define CABLE_NAME_MAX         30
@@ -74,12 +76,12 @@ struct extcon_cable;
 
 /**
  * struct extcon_dev - An extcon device represents one external connector.
- * @name       The name of this extcon device. Parent device name is used
+ * @name:      The name of this extcon device. Parent device name is used
  *             if NULL.
- * @supported_cable    Array of supported cable names ending with NULL.
+ * @supported_cable:   Array of supported cable names ending with NULL.
  *                     If supported_cable is NULL, cable name related APIs
  *                     are disabled.
- * @mutually_exclusive Array of mutually exclusive set of cables that cannot
+ * @mutually_exclusive:        Array of mutually exclusive set of cables that cannot
  *                     be attached simultaneously. The array should be
  *                     ending with NULL or be NULL (no mutually exclusive
  *                     cables). For example, if it is { 0x7, 0x30, 0}, then,
@@ -87,21 +89,21 @@ struct extcon_cable;
  *                     be attached simulataneously. {0x7, 0} is equivalent to
  *                     {0x3, 0x6, 0x5, 0}. If it is {0xFFFFFFFF, 0}, there
  *                     can be no simultaneous connections.
- * @print_name An optional callback to override the method to print the
+ * @print_name:        An optional callback to override the method to print the
  *             name of the extcon device.
- * @print_state        An optional callback to override the method to print the
+ * @print_state:       An optional callback to override the method to print the
  *             status of the extcon device.
- * @dev                Device of this extcon. Do not provide at register-time.
- * @state      Attach/detach state of this extcon. Do not provide at
+ * @dev:       Device of this extcon. Do not provide at register-time.
+ * @state:     Attach/detach state of this extcon. Do not provide at
  *             register-time
- * @nh Notifier for the state change events from this extcon
- * @entry      To support list of extcon devices so that users can search
+ * @nh:        Notifier for the state change events from this extcon
+ * @entry:     To support list of extcon devices so that users can search
  *             for extcon devices based on the extcon name.
- * @lock
- * @max_supported      Internal value to store the number of cables.
- * @extcon_dev_type    Device_type struct to provide attribute_groups
+ * @lock:
+ * @max_supported:     Internal value to store the number of cables.
+ * @extcon_dev_type:   Device_type struct to provide attribute_groups
  *                     customized for each extcon device.
- * @cables     Sysfs subdirectories. Each represents one cable.
+ * @cables:    Sysfs subdirectories. Each represents one cable.
  *
  * In most cases, users only need to provide "User initializing data" of
  * this struct when registering an extcon. In some exceptional cases,
@@ -137,12 +139,12 @@ struct extcon_dev {
 
 /**
  * struct extcon_cable - An internal data for each cable of extcon device.
- * @edev       The extcon device
- * @cable_index        Index of this cable in the edev
- * @attr_g     Attribute group for the cable
- * @attr_name  "name" sysfs entry
- * @attr_state "state" sysfs entry
- * @attrs      Array pointing to attr_name and attr_state for attr_g
+ * @edev:      The extcon device
+ * @cable_index:       Index of this cable in the edev
+ * @attr_g:    Attribute group for the cable
+ * @attr_name: "name" sysfs entry
+ * @attr_state:        "state" sysfs entry
+ * @attrs:     Array pointing to attr_name and attr_state for attr_g
  */
 struct extcon_cable {
        struct extcon_dev *edev;
@@ -158,11 +160,11 @@ struct extcon_cable {
 /**
  * struct extcon_specific_cable_nb - An internal data for
  *                             extcon_register_interest().
- * @internal_nb        a notifier block bridging extcon notifier and cable notifier.
- * @user_nb    user provided notifier block for events from a specific cable.
- * @cable_index        the target cable.
- * @edev       the target extcon device.
- * @previous_value     the saved previous event value.
+ * @internal_nb:       a notifier block bridging extcon notifier and cable notifier.
+ * @user_nb:   user provided notifier block for events from a specific cable.
+ * @cable_index:       the target cable.
+ * @edev:      the target extcon device.
+ * @previous_value:    the saved previous event value.
  */
 struct extcon_specific_cable_nb {
        struct notifier_block internal_nb;
index 092dc53..1d76f8c 100644 (file)
@@ -31,7 +31,8 @@ extern int move_huge_pmd(struct vm_area_struct *vma,
                         unsigned long new_addr, unsigned long old_end,
                         pmd_t *old_pmd, pmd_t *new_pmd);
 extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
-                       unsigned long addr, pgprot_t newprot);
+                       unsigned long addr, pgprot_t newprot,
+                       int prot_numa);
 
 enum transparent_hugepage_flag {
        TRANSPARENT_HUGEPAGE_FLAG,
@@ -111,7 +112,7 @@ extern void __split_huge_page_pmd(struct vm_area_struct *vma,
 #define wait_split_huge_page(__anon_vma, __pmd)                                \
        do {                                                            \
                pmd_t *____pmd = (__pmd);                               \
-               anon_vma_lock(__anon_vma);                              \
+               anon_vma_lock_write(__anon_vma);                        \
                anon_vma_unlock(__anon_vma);                            \
                BUG_ON(pmd_trans_splitting(*____pmd) ||                 \
                       pmd_trans_huge(*____pmd));                       \
@@ -171,6 +172,10 @@ static inline struct page *compound_trans_head(struct page *page)
        }
        return page;
 }
+
+extern int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
+                               unsigned long addr, pmd_t pmd, pmd_t *pmdp);
+
 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
 #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
 #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
@@ -209,6 +214,13 @@ static inline int pmd_trans_huge_lock(pmd_t *pmd,
 {
        return 0;
 }
+
+static inline int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
+                                       unsigned long addr, pmd_t pmd, pmd_t *pmdp)
+{
+       return 0;
+}
+
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
 #endif /* _LINUX_HUGE_MM_H */
index 3e7fa1a..0c80d3f 100644 (file)
@@ -87,7 +87,7 @@ struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
                                pud_t *pud, int write);
 int pmd_huge(pmd_t pmd);
 int pud_huge(pud_t pmd);
-void hugetlb_change_protection(struct vm_area_struct *vma,
+unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
                unsigned long address, unsigned long end, pgprot_t newprot);
 
 #else /* !CONFIG_HUGETLB_PAGE */
@@ -132,7 +132,11 @@ static inline void copy_huge_page(struct page *dst, struct page *src)
 {
 }
 
-#define hugetlb_change_protection(vma, address, end, newprot)
+static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
+               unsigned long address, unsigned long end, pgprot_t newprot)
+{
+       return 0;
+}
 
 static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
                        struct vm_area_struct *vma, unsigned long start,
index 9a5e284..1ff54b1 100644 (file)
  * address each module uses within a given i2c slave.
  */
 
-/* Slave 0 (i2c address 0x48) */
-#define TWL4030_MODULE_USB             0x00
-
-/* Slave 1 (i2c address 0x49) */
-#define TWL4030_MODULE_AUDIO_VOICE     0x01
-#define TWL4030_MODULE_GPIO            0x02
-#define TWL4030_MODULE_INTBR           0x03
-#define TWL4030_MODULE_PIH             0x04
-#define TWL4030_MODULE_TEST            0x05
-
-/* Slave 2 (i2c address 0x4a) */
-#define TWL4030_MODULE_KEYPAD          0x06
-#define TWL4030_MODULE_MADC            0x07
-#define TWL4030_MODULE_INTERRUPTS      0x08
-#define TWL4030_MODULE_LED             0x09
-#define TWL4030_MODULE_MAIN_CHARGE     0x0A
-#define TWL4030_MODULE_PRECHARGE       0x0B
-#define TWL4030_MODULE_PWM0            0x0C
-#define TWL4030_MODULE_PWM1            0x0D
-#define TWL4030_MODULE_PWMA            0x0E
-#define TWL4030_MODULE_PWMB            0x0F
-
-#define TWL5031_MODULE_ACCESSORY       0x10
-#define TWL5031_MODULE_INTERRUPTS      0x11
-
-/* Slave 3 (i2c address 0x4b) */
-#define TWL4030_MODULE_BACKUP          0x12
-#define TWL4030_MODULE_INT             0x13
-#define TWL4030_MODULE_PM_MASTER       0x14
-#define TWL4030_MODULE_PM_RECEIVER     0x15
-#define TWL4030_MODULE_RTC             0x16
-#define TWL4030_MODULE_SECURED_REG     0x17
+enum twl4030_module_ids {
+       TWL4030_MODULE_USB = 0,         /* Slave 0 (i2c address 0x48) */
+       TWL4030_MODULE_AUDIO_VOICE,     /* Slave 1 (i2c address 0x49) */
+       TWL4030_MODULE_GPIO,
+       TWL4030_MODULE_INTBR,
+       TWL4030_MODULE_PIH,
+
+       TWL4030_MODULE_TEST,
+       TWL4030_MODULE_KEYPAD,          /* Slave 2 (i2c address 0x4a) */
+       TWL4030_MODULE_MADC,
+       TWL4030_MODULE_INTERRUPTS,
+       TWL4030_MODULE_LED,
+
+       TWL4030_MODULE_MAIN_CHARGE,
+       TWL4030_MODULE_PRECHARGE,
+       TWL4030_MODULE_PWM0,
+       TWL4030_MODULE_PWM1,
+       TWL4030_MODULE_PWMA,
+
+       TWL4030_MODULE_PWMB,
+       TWL5031_MODULE_ACCESSORY,
+       TWL5031_MODULE_INTERRUPTS,
+       TWL4030_MODULE_BACKUP,          /* Slave 3 (i2c address 0x4b) */
+       TWL4030_MODULE_INT,
+
+       TWL4030_MODULE_PM_MASTER,
+       TWL4030_MODULE_PM_RECEIVER,
+       TWL4030_MODULE_RTC,
+       TWL4030_MODULE_SECURED_REG,
+       TWL4030_MODULE_LAST,
+};
 
+/* Similar functionalities implemented in TWL4030/6030 */
 #define TWL_MODULE_USB         TWL4030_MODULE_USB
-#define TWL_MODULE_AUDIO_VOICE TWL4030_MODULE_AUDIO_VOICE
 #define TWL_MODULE_PIH         TWL4030_MODULE_PIH
-#define TWL_MODULE_MADC                TWL4030_MODULE_MADC
 #define TWL_MODULE_MAIN_CHARGE TWL4030_MODULE_MAIN_CHARGE
 #define TWL_MODULE_PM_MASTER   TWL4030_MODULE_PM_MASTER
 #define TWL_MODULE_PM_RECEIVER TWL4030_MODULE_PM_RECEIVER
 #define TWL_MODULE_RTC         TWL4030_MODULE_RTC
 #define TWL_MODULE_PWM         TWL4030_MODULE_PWM0
+#define TWL_MODULE_LED         TWL4030_MODULE_LED
 
-#define TWL6030_MODULE_ID0     0x0D
-#define TWL6030_MODULE_ID1     0x0E
-#define TWL6030_MODULE_ID2     0x0F
+#define TWL6030_MODULE_ID0     13
+#define TWL6030_MODULE_ID1     14
+#define TWL6030_MODULE_ID2     15
 
 #define GPIO_INTR_OFFSET       0
 #define KEYPAD_INTR_OFFSET     1
diff --git a/include/linux/input/ti_am335x_tsc.h b/include/linux/input/ti_am335x_tsc.h
new file mode 100644 (file)
index 0000000..49269a2
--- /dev/null
@@ -0,0 +1,23 @@
+#ifndef __LINUX_TI_AM335X_TSC_H
+#define __LINUX_TI_AM335X_TSC_H
+
+/**
+ * struct tsc_data     Touchscreen wire configuration
+ * @wires:             Wires refer to application modes
+ *                     i.e. 4/5/8 wire touchscreen support
+ *                     on the platform.
+ * @x_plate_resistance:        X plate resistance.
+ * @steps_to_configure:        The sequencer supports a total of
+ *                     16 programmable steps.
+ *                     A step configured to read a single
+ *                     co-ordinate value, can be applied
+ *                     more number of times for better results.
+ */
+
+struct tsc_data {
+       int wires;
+       int x_plate_resistance;
+       int steps_to_configure;
+};
+
+#endif
diff --git a/include/linux/input/ti_tscadc.h b/include/linux/input/ti_tscadc.h
deleted file mode 100644 (file)
index b10a527..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-#ifndef __LINUX_TI_TSCADC_H
-#define __LINUX_TI_TSCADC_H
-
-/**
- * struct tsc_data     Touchscreen wire configuration
- * @wires:             Wires refer to application modes
- *                     i.e. 4/5/8 wire touchscreen support
- *                     on the platform.
- * @x_plate_resistance:        X plate resistance.
- */
-
-struct tsc_data {
-       int wires;
-       int x_plate_resistance;
-};
-
-#endif
index 3efc43f..1be23d9 100644 (file)
@@ -1096,7 +1096,6 @@ extern int         jbd2_journal_get_undo_access(handle_t *, struct buffer_head *);
 void            jbd2_journal_set_triggers(struct buffer_head *,
                                           struct jbd2_buffer_trigger_type *type);
 extern int      jbd2_journal_dirty_metadata (handle_t *, struct buffer_head *);
-extern void     jbd2_journal_release_buffer (handle_t *, struct buffer_head *);
 extern int      jbd2_journal_forget (handle_t *, struct buffer_head *);
 extern void     journal_sync_buffer (struct buffer_head *);
 extern void     jbd2_journal_invalidatepage(journal_t *,
@@ -1303,15 +1302,21 @@ static inline int jbd_space_needed(journal_t *journal)
 
 extern int jbd_blocks_per_page(struct inode *inode);
 
+/* JBD uses a CRC32 checksum */
+#define JBD_MAX_CHECKSUM_SIZE 4
+
 static inline u32 jbd2_chksum(journal_t *journal, u32 crc,
                              const void *address, unsigned int length)
 {
        struct {
                struct shash_desc shash;
-               char ctx[crypto_shash_descsize(journal->j_chksum_driver)];
+               char ctx[JBD_MAX_CHECKSUM_SIZE];
        } desc;
        int err;
 
+       BUG_ON(crypto_shash_descsize(journal->j_chksum_driver) >
+               JBD_MAX_CHECKSUM_SIZE);
+
        desc.shash.tfm = journal->j_chksum_driver;
        desc.shash.flags = 0;
        *(u32 *)desc.ctx = crc;
index 2393b1c..4dfde11 100644 (file)
@@ -265,6 +265,7 @@ extern int key_unlink(struct key *keyring,
 
 extern struct key *keyring_alloc(const char *description, kuid_t uid, kgid_t gid,
                                 const struct cred *cred,
+                                key_perm_t perm,
                                 unsigned long flags,
                                 struct key *dest);
 
index 65af688..4972e6e 100644 (file)
@@ -111,4 +111,25 @@ static inline int kref_put_mutex(struct kref *kref,
        }
        return 0;
 }
+
+/**
+ * kref_get_unless_zero - Increment refcount for object unless it is zero.
+ * @kref: object.
+ *
+ * Return non-zero if the increment succeeded. Otherwise return 0.
+ *
+ * This function is intended to simplify locking around refcounting for
+ * objects that can be looked up from a lookup structure, and which are
+ * removed from that lookup structure in the object destructor.
+ * Operations on such objects require at least a read lock around
+ * lookup + kref_get, and a write lock around kref_put + remove from lookup
+ * structure. Furthermore, RCU implementations become extremely tricky.
+ * With a lookup followed by a kref_get_unless_zero *with return value check*
+ * locking in the kref_put path can be deferred to the actual removal from
+ * the lookup structure and RCU lookups become trivial.
+ */
+static inline int __must_check kref_get_unless_zero(struct kref *kref)
+{
+       return atomic_add_unless(&kref->refcount, 1, 0);
+}
 #endif /* _KREF_H_ */
index dbd2127..9adc270 100644 (file)
@@ -188,6 +188,8 @@ static inline int vma_migratable(struct vm_area_struct *vma)
        return 1;
 }
 
+extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long);
+
 #else
 
 struct mempolicy {};
@@ -307,5 +309,11 @@ static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol,
        return 0;
 }
 
+static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma,
+                                unsigned long address)
+{
+       return -1; /* no node preference */
+}
+
 #endif /* CONFIG_NUMA */
 #endif
index ba26e99..1f6fe31 100644 (file)
 #define ARIZONA_DSP1_CLOCKING_1                  0x1101
 #define ARIZONA_DSP1_STATUS_1                    0x1104
 #define ARIZONA_DSP1_STATUS_2                    0x1105
+#define ARIZONA_DSP1_STATUS_3                    0x1106
 #define ARIZONA_DSP2_CONTROL_1                   0x1200
 #define ARIZONA_DSP2_CLOCKING_1                  0x1201
 #define ARIZONA_DSP2_STATUS_1                    0x1204
diff --git a/include/linux/mfd/as3711.h b/include/linux/mfd/as3711.h
new file mode 100644 (file)
index 0000000..38452ce
--- /dev/null
@@ -0,0 +1,126 @@
+/*
+ * AS3711 PMIC MFC driver header
+ *
+ * Copyright (C) 2012 Renesas Electronics Corporation
+ * Author: Guennadi Liakhovetski, <g.liakhovetski@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License as
+ * published by the Free Software Foundation
+ */
+
+#ifndef MFD_AS3711_H
+#define MFD_AS3711_H
+
+/*
+ * Client data
+ */
+
+/* Register addresses */
+#define AS3711_SD_1_VOLTAGE            0       /* Digital Step-Down */
+#define AS3711_SD_2_VOLTAGE            1
+#define AS3711_SD_3_VOLTAGE            2
+#define AS3711_SD_4_VOLTAGE            3
+#define AS3711_LDO_1_VOLTAGE           4       /* Analog LDO */
+#define AS3711_LDO_2_VOLTAGE           5
+#define AS3711_LDO_3_VOLTAGE           6       /* Digital LDO */
+#define AS3711_LDO_4_VOLTAGE           7
+#define AS3711_LDO_5_VOLTAGE           8
+#define AS3711_LDO_6_VOLTAGE           9
+#define AS3711_LDO_7_VOLTAGE           0xa
+#define AS3711_LDO_8_VOLTAGE           0xb
+#define AS3711_SD_CONTROL              0x10
+#define AS3711_GPIO_SIGNAL_OUT         0x20
+#define AS3711_GPIO_SIGNAL_IN          0x21
+#define AS3711_SD_CONTROL_1            0x30
+#define AS3711_SD_CONTROL_2            0x31
+#define AS3711_CURR_CONTROL            0x40
+#define AS3711_CURR1_VALUE             0x43
+#define AS3711_CURR2_VALUE             0x44
+#define AS3711_CURR3_VALUE             0x45
+#define AS3711_STEPUP_CONTROL_1                0x50
+#define AS3711_STEPUP_CONTROL_2                0x51
+#define AS3711_STEPUP_CONTROL_4                0x53
+#define AS3711_STEPUP_CONTROL_5                0x54
+#define AS3711_REG_STATUS              0x73
+#define AS3711_INTERRUPT_STATUS_1      0x77
+#define AS3711_INTERRUPT_STATUS_2      0x78
+#define AS3711_INTERRUPT_STATUS_3      0x79
+#define AS3711_CHARGER_STATUS_1                0x86
+#define AS3711_CHARGER_STATUS_2                0x87
+#define AS3711_ASIC_ID_1               0x90
+#define AS3711_ASIC_ID_2               0x91
+
+#define AS3711_MAX_REGS                        0x92
+
+/* Regulators */
+enum {
+       AS3711_REGULATOR_SD_1,
+       AS3711_REGULATOR_SD_2,
+       AS3711_REGULATOR_SD_3,
+       AS3711_REGULATOR_SD_4,
+       AS3711_REGULATOR_LDO_1,
+       AS3711_REGULATOR_LDO_2,
+       AS3711_REGULATOR_LDO_3,
+       AS3711_REGULATOR_LDO_4,
+       AS3711_REGULATOR_LDO_5,
+       AS3711_REGULATOR_LDO_6,
+       AS3711_REGULATOR_LDO_7,
+       AS3711_REGULATOR_LDO_8,
+
+       AS3711_REGULATOR_MAX,
+};
+
+struct device;
+struct regmap;
+
+struct as3711 {
+       struct device *dev;
+       struct regmap *regmap;
+};
+
+#define AS3711_MAX_STEPDOWN 4
+#define AS3711_MAX_STEPUP 2
+#define AS3711_MAX_LDO 8
+
+enum as3711_su2_feedback {
+       AS3711_SU2_VOLTAGE,
+       AS3711_SU2_CURR1,
+       AS3711_SU2_CURR2,
+       AS3711_SU2_CURR3,
+       AS3711_SU2_CURR_AUTO,
+};
+
+enum as3711_su2_fbprot {
+       AS3711_SU2_LX_SD4,
+       AS3711_SU2_GPIO2,
+       AS3711_SU2_GPIO3,
+       AS3711_SU2_GPIO4,
+};
+
+/*
+ * Platform data
+ */
+
+struct as3711_regulator_pdata {
+       struct regulator_init_data *init_data[AS3711_REGULATOR_MAX];
+};
+
+struct as3711_bl_pdata {
+       const char *su1_fb;
+       int su1_max_uA;
+       const char *su2_fb;
+       int su2_max_uA;
+       enum as3711_su2_feedback su2_feedback;
+       enum as3711_su2_fbprot su2_fbprot;
+       bool su2_auto_curr1;
+       bool su2_auto_curr2;
+       bool su2_auto_curr3;
+};
+
+struct as3711_platform_data {
+       struct as3711_regulator_pdata regulator;
+       struct as3711_bl_pdata backlight;
+};
+
+#endif
index 0507c4c..86dd93d 100644 (file)
@@ -146,4 +146,14 @@ void da9052_device_exit(struct da9052 *da9052);
 
 extern struct regmap_config da9052_regmap_config;
 
+int da9052_irq_init(struct da9052 *da9052);
+int da9052_irq_exit(struct da9052 *da9052);
+int da9052_request_irq(struct da9052 *da9052, int irq, char *name,
+                          irq_handler_t handler, void *data);
+void da9052_free_irq(struct da9052 *da9052, int irq, void *data);
+
+int da9052_enable_irq(struct da9052 *da9052, int irq);
+int da9052_disable_irq(struct da9052 *da9052, int irq);
+int da9052_disable_irq_nosync(struct da9052 *da9052, int irq);
+
 #endif /* __MFD_DA9052_DA9052_H */
index c96ad68..956afa4 100644 (file)
@@ -1,4 +1,4 @@
-/*
+/*
  * da9055 declarations for DA9055 PMICs.
  *
  * Copyright(c) 2012 Dialog Semiconductor Ltd.
index f87a6c1..04e092b 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2012 Dialog Semiconductor Ltd.
+/* Copyright (C) 2012 Dialog Semiconductor Ltd.
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
index df237ee..2b592e0 100644 (file)
@@ -1,4 +1,4 @@
-/*
+/*
  * DA9055 declarations for DA9055 PMICs.
  *
  * Copyright(c) 2012 Dialog Semiconductor Ltd.
index 36c242e..fd413cc 100644 (file)
@@ -33,6 +33,7 @@
 /* Maximum number of main interrupts */
 #define MAX_MAIN_INTERRUPT             5
 #define RC5T583_MAX_GPEDGE_REG         2
+#define RC5T583_MAX_INTERRUPT_EN_REGS  8
 #define RC5T583_MAX_INTERRUPT_MASK_REGS        9
 
 /* Interrupt enable register */
@@ -304,7 +305,7 @@ struct rc5t583 {
        uint8_t         intc_inten_reg;
 
        /* For group interrupt bits and address */
-       uint8_t         irq_en_reg[RC5T583_MAX_INTERRUPT_MASK_REGS];
+       uint8_t         irq_en_reg[RC5T583_MAX_INTERRUPT_EN_REGS];
 
        /* For gpio edge */
        uint8_t         gpedge_reg[RC5T583_MAX_GPEDGE_REG];
diff --git a/include/linux/mfd/retu.h b/include/linux/mfd/retu.h
new file mode 100644 (file)
index 0000000..1e2715d
--- /dev/null
@@ -0,0 +1,22 @@
+/*
+ * Retu MFD driver interface
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of this
+ * archive for more details.
+ */
+
+#ifndef __LINUX_MFD_RETU_H
+#define __LINUX_MFD_RETU_H
+
+struct retu_dev;
+
+int retu_read(struct retu_dev *, u8);
+int retu_write(struct retu_dev *, u8, u16);
+
+/* Registers */
+#define RETU_REG_WATCHDOG      0x17            /* Watchdog */
+#define RETU_REG_CC1           0x0d            /* Common control register 1 */
+#define RETU_REG_STATUS                0x16            /* Status register */
+
+#endif /* __LINUX_MFD_RETU_H */
index d179227..9a855ac 100644 (file)
 #include <linux/types.h>
 #include <linux/pci.h>
 
+enum sta2x11_mfd_plat_dev {
+       sta2x11_sctl = 0,
+       sta2x11_gpio,
+       sta2x11_scr,
+       sta2x11_time,
+       sta2x11_apbreg,
+       sta2x11_apb_soc_regs,
+       sta2x11_vic,
+       sta2x11_n_mfd_plat_devs,
+};
+
+#define STA2X11_MFD_SCTL_NAME         "sta2x11-sctl"
+#define STA2X11_MFD_GPIO_NAME         "sta2x11-gpio"
+#define STA2X11_MFD_SCR_NAME          "sta2x11-scr"
+#define STA2X11_MFD_TIME_NAME         "sta2x11-time"
+#define STA2X11_MFD_APBREG_NAME               "sta2x11-apbreg"
+#define STA2X11_MFD_APB_SOC_REGS_NAME  "sta2x11-apb-soc-regs"
+#define STA2X11_MFD_VIC_NAME          "sta2x11-vic"
+
+extern u32
+__sta2x11_mfd_mask(struct pci_dev *, u32, u32, u32, enum sta2x11_mfd_plat_dev);
+
 /*
  * The MFD PCI block includes the GPIO peripherals and other register blocks.
  * For GPIO, we have 32*4 bits (I use "gsta" for "gpio sta2x11".)
@@ -182,7 +204,11 @@ struct sta2x11_gpio_pdata {
  * The APB bridge has its own registers, needed by our users as well.
  * They are accessed with the following read/mask/write function.
  */
-u32 sta2x11_apbreg_mask(struct pci_dev *pdev, u32 reg, u32 mask, u32 val);
+static inline u32
+sta2x11_apbreg_mask(struct pci_dev *pdev, u32 reg, u32 mask, u32 val)
+{
+       return __sta2x11_mfd_mask(pdev, reg, mask, val, sta2x11_apbreg);
+}
 
 /* CAN and MLB */
 #define APBREG_BSR     0x00    /* Bridge Status Reg */
@@ -211,19 +237,45 @@ u32 sta2x11_apbreg_mask(struct pci_dev *pdev, u32 reg, u32 mask, u32 val);
  * The system controller has its own registers. Some of these are accessed
  * by out users as well, using the following read/mask/write/function
  */
-u32 sta2x11_sctl_mask(struct pci_dev *pdev, u32 reg, u32 mask, u32 val);
+static inline
+u32 sta2x11_sctl_mask(struct pci_dev *pdev, u32 reg, u32 mask, u32 val)
+{
+       return __sta2x11_mfd_mask(pdev, reg, mask, val, sta2x11_sctl);
+}
 
 #define SCTL_SCCTL             0x00    /* System controller control register */
 #define SCTL_ARMCFG            0x04    /* ARM configuration register */
 #define SCTL_SCPLLCTL          0x08    /* PLL control status register */
+
+#define SCTL_SCPLLCTL_AUDIO_PLL_PD          BIT(1)
+#define SCTL_SCPLLCTL_FRAC_CONTROL          BIT(3)
+#define SCTL_SCPLLCTL_STRB_BYPASS           BIT(6)
+#define SCTL_SCPLLCTL_STRB_INPUT            BIT(8)
+
 #define SCTL_SCPLLFCTRL                0x0c    /* PLL frequency control register */
+
+#define SCTL_SCPLLFCTRL_AUDIO_PLL_NDIV_MASK    0xff
+#define SCTL_SCPLLFCTRL_AUDIO_PLL_NDIV_SHIFT     10
+#define SCTL_SCPLLFCTRL_AUDIO_PLL_IDF_MASK        7
+#define SCTL_SCPLLFCTRL_AUDIO_PLL_IDF_SHIFT      21
+#define SCTL_SCPLLFCTRL_AUDIO_PLL_ODF_MASK        7
+#define SCTL_SCPLLFCTRL_AUDIO_PLL_ODF_SHIFT      18
+#define SCTL_SCPLLFCTRL_DITHER_DISABLE_MASK     0x03
+#define SCTL_SCPLLFCTRL_DITHER_DISABLE_SHIFT       4
+
+
 #define SCTL_SCRESFRACT                0x10    /* PLL fractional input register */
+
+#define SCTL_SCRESFRACT_MASK   0x0000ffff
+
+
 #define SCTL_SCRESCTRL1                0x14    /* Peripheral reset control 1 */
 #define SCTL_SCRESXTRL2                0x18    /* Peripheral reset control 2 */
 #define SCTL_SCPEREN0          0x1c    /* Peripheral clock enable register 0 */
 #define SCTL_SCPEREN1          0x20    /* Peripheral clock enable register 1 */
 #define SCTL_SCPEREN2          0x24    /* Peripheral clock enable register 2 */
 #define SCTL_SCGRST            0x28    /* Peripheral global reset */
+#define SCTL_SCPCIECSBRST       0x2c    /* PCIe PAB CSB reset status register */
 #define SCTL_SCPCIPMCR1                0x30    /* PCI power management control 1 */
 #define SCTL_SCPCIPMCR2                0x34    /* PCI power management control 2 */
 #define SCTL_SCPCIPMSR1                0x38    /* PCI power management status 1 */
@@ -321,4 +373,146 @@ u32 sta2x11_sctl_mask(struct pci_dev *pdev, u32 reg, u32 mask, u32 val);
 #define SCTL_SCPEREN1_I2C3             (1 << 16)
 #define SCTL_SCPEREN1_USB_PHY          (1 << 17)
 
+/*
+ * APB-SOC registers
+ */
+static inline
+u32 sta2x11_apb_soc_regs_mask(struct pci_dev *pdev, u32 reg, u32 mask, u32 val)
+{
+       return __sta2x11_mfd_mask(pdev, reg, mask, val, sta2x11_apb_soc_regs);
+}
+
+#define PCIE_EP1_FUNC3_0_INTR_REG      0x000
+#define PCIE_EP1_FUNC7_4_INTR_REG      0x004
+#define PCIE_EP2_FUNC3_0_INTR_REG      0x008
+#define PCIE_EP2_FUNC7_4_INTR_REG      0x00c
+#define PCIE_EP3_FUNC3_0_INTR_REG      0x010
+#define PCIE_EP3_FUNC7_4_INTR_REG      0x014
+#define PCIE_EP4_FUNC3_0_INTR_REG      0x018
+#define PCIE_EP4_FUNC7_4_INTR_REG      0x01c
+#define PCIE_INTR_ENABLE0_REG          0x020
+#define PCIE_INTR_ENABLE1_REG          0x024
+#define PCIE_EP1_FUNC_TC_REG           0x028
+#define PCIE_EP2_FUNC_TC_REG           0x02c
+#define PCIE_EP3_FUNC_TC_REG           0x030
+#define PCIE_EP4_FUNC_TC_REG           0x034
+#define PCIE_EP1_FUNC_F_REG            0x038
+#define PCIE_EP2_FUNC_F_REG            0x03c
+#define PCIE_EP3_FUNC_F_REG            0x040
+#define PCIE_EP4_FUNC_F_REG            0x044
+#define PCIE_PAB_AMBA_SW_RST_REG       0x048
+#define PCIE_PM_STATUS_0_PORT_0_4      0x04c
+#define PCIE_PM_STATUS_7_0_EP1         0x050
+#define PCIE_PM_STATUS_7_0_EP2         0x054
+#define PCIE_PM_STATUS_7_0_EP3         0x058
+#define PCIE_PM_STATUS_7_0_EP4         0x05c
+#define PCIE_DEV_ID_0_EP1_REG          0x060
+#define PCIE_CC_REV_ID_0_EP1_REG       0x064
+#define PCIE_DEV_ID_1_EP1_REG          0x068
+#define PCIE_CC_REV_ID_1_EP1_REG       0x06c
+#define PCIE_DEV_ID_2_EP1_REG          0x070
+#define PCIE_CC_REV_ID_2_EP1_REG       0x074
+#define PCIE_DEV_ID_3_EP1_REG          0x078
+#define PCIE_CC_REV_ID_3_EP1_REG       0x07c
+#define PCIE_DEV_ID_4_EP1_REG          0x080
+#define PCIE_CC_REV_ID_4_EP1_REG       0x084
+#define PCIE_DEV_ID_5_EP1_REG          0x088
+#define PCIE_CC_REV_ID_5_EP1_REG       0x08c
+#define PCIE_DEV_ID_6_EP1_REG          0x090
+#define PCIE_CC_REV_ID_6_EP1_REG       0x094
+#define PCIE_DEV_ID_7_EP1_REG          0x098
+#define PCIE_CC_REV_ID_7_EP1_REG       0x09c
+#define PCIE_DEV_ID_0_EP2_REG          0x0a0
+#define PCIE_CC_REV_ID_0_EP2_REG       0x0a4
+#define PCIE_DEV_ID_1_EP2_REG          0x0a8
+#define PCIE_CC_REV_ID_1_EP2_REG       0x0ac
+#define PCIE_DEV_ID_2_EP2_REG          0x0b0
+#define PCIE_CC_REV_ID_2_EP2_REG       0x0b4
+#define PCIE_DEV_ID_3_EP2_REG          0x0b8
+#define PCIE_CC_REV_ID_3_EP2_REG       0x0bc
+#define PCIE_DEV_ID_4_EP2_REG          0x0c0
+#define PCIE_CC_REV_ID_4_EP2_REG       0x0c4
+#define PCIE_DEV_ID_5_EP2_REG          0x0c8
+#define PCIE_CC_REV_ID_5_EP2_REG       0x0cc
+#define PCIE_DEV_ID_6_EP2_REG          0x0d0
+#define PCIE_CC_REV_ID_6_EP2_REG       0x0d4
+#define PCIE_DEV_ID_7_EP2_REG          0x0d8
+#define PCIE_CC_REV_ID_7_EP2_REG       0x0dC
+#define PCIE_DEV_ID_0_EP3_REG          0x0e0
+#define PCIE_CC_REV_ID_0_EP3_REG       0x0e4
+#define PCIE_DEV_ID_1_EP3_REG          0x0e8
+#define PCIE_CC_REV_ID_1_EP3_REG       0x0ec
+#define PCIE_DEV_ID_2_EP3_REG          0x0f0
+#define PCIE_CC_REV_ID_2_EP3_REG       0x0f4
+#define PCIE_DEV_ID_3_EP3_REG          0x0f8
+#define PCIE_CC_REV_ID_3_EP3_REG       0x0fc
+#define PCIE_DEV_ID_4_EP3_REG          0x100
+#define PCIE_CC_REV_ID_4_EP3_REG       0x104
+#define PCIE_DEV_ID_5_EP3_REG          0x108
+#define PCIE_CC_REV_ID_5_EP3_REG       0x10c
+#define PCIE_DEV_ID_6_EP3_REG          0x110
+#define PCIE_CC_REV_ID_6_EP3_REG       0x114
+#define PCIE_DEV_ID_7_EP3_REG          0x118
+#define PCIE_CC_REV_ID_7_EP3_REG       0x11c
+#define PCIE_DEV_ID_0_EP4_REG          0x120
+#define PCIE_CC_REV_ID_0_EP4_REG       0x124
+#define PCIE_DEV_ID_1_EP4_REG          0x128
+#define PCIE_CC_REV_ID_1_EP4_REG       0x12c
+#define PCIE_DEV_ID_2_EP4_REG          0x130
+#define PCIE_CC_REV_ID_2_EP4_REG       0x134
+#define PCIE_DEV_ID_3_EP4_REG          0x138
+#define PCIE_CC_REV_ID_3_EP4_REG       0x13c
+#define PCIE_DEV_ID_4_EP4_REG          0x140
+#define PCIE_CC_REV_ID_4_EP4_REG       0x144
+#define PCIE_DEV_ID_5_EP4_REG          0x148
+#define PCIE_CC_REV_ID_5_EP4_REG       0x14c
+#define PCIE_DEV_ID_6_EP4_REG          0x150
+#define PCIE_CC_REV_ID_6_EP4_REG       0x154
+#define PCIE_DEV_ID_7_EP4_REG          0x158
+#define PCIE_CC_REV_ID_7_EP4_REG       0x15c
+#define PCIE_SUBSYS_VEN_ID_REG         0x160
+#define PCIE_COMMON_CLOCK_CONFIG_0_4_0 0x164
+#define PCIE_MIPHYP_SSC_EN_REG         0x168
+#define PCIE_MIPHYP_ADDR_REG           0x16c
+#define PCIE_L1_ASPM_READY_REG         0x170
+#define PCIE_EXT_CFG_RDY_REG           0x174
+#define PCIE_SoC_INT_ROUTER_STATUS0_REG 0x178
+#define PCIE_SoC_INT_ROUTER_STATUS1_REG 0x17c
+#define PCIE_SoC_INT_ROUTER_STATUS2_REG 0x180
+#define PCIE_SoC_INT_ROUTER_STATUS3_REG 0x184
+#define DMA_IP_CTRL_REG                        0x324
+#define DISP_BRIDGE_PU_PD_CTRL_REG     0x328
+#define VIP_PU_PD_CTRL_REG             0x32c
+#define USB_MLB_PU_PD_CTRL_REG         0x330
+#define SDIO_PU_PD_MISCFUNC_CTRL_REG1  0x334
+#define SDIO_PU_PD_MISCFUNC_CTRL_REG2  0x338
+#define UART_PU_PD_CTRL_REG            0x33c
+#define ARM_Lock                       0x340
+#define SYS_IO_CHAR_REG1               0x344
+#define SYS_IO_CHAR_REG2               0x348
+#define SATA_CORE_ID_REG               0x34c
+#define SATA_CTRL_REG                  0x350
+#define I2C_HSFIX_MISC_REG             0x354
+#define SPARE2_RESERVED                        0x358
+#define SPARE3_RESERVED                        0x35c
+#define MASTER_LOCK_REG                        0x368
+#define SYSTEM_CONFIG_STATUS_REG       0x36c
+#define MSP_CLK_CTRL_REG               0x39c
+#define COMPENSATION_REG1              0x3c4
+#define COMPENSATION_REG2              0x3c8
+#define COMPENSATION_REG3              0x3cc
+#define TEST_CTL_REG                   0x3d0
+
+/*
+ * SECR (OTP) registers
+ */
+#define STA2X11_SECR_CR                        0x00
+#define STA2X11_SECR_FVR0              0x10
+#define STA2X11_SECR_FVR1              0x14
+
+extern int sta2x11_mfd_get_regs_data(struct platform_device *pdev,
+                                    enum sta2x11_mfd_plat_dev index,
+                                    void __iomem **regs,
+                                    spinlock_t **lock);
+
 #endif /* __STA2X11_MFD_H */
index f8d5b4d..383ac15 100644 (file)
@@ -62,6 +62,7 @@ struct stmpe_client_info;
  * @lock: lock protecting I/O operations
  * @irq_lock: IRQ bus lock
  * @dev: device, mostly for dev_dbg()
+ * @irq_domain: IRQ domain
  * @client: client - i2c or spi
  * @ci: client specific information
  * @partnum: part number
@@ -79,6 +80,7 @@ struct stmpe {
        struct mutex lock;
        struct mutex irq_lock;
        struct device *dev;
+       struct irq_domain *domain;
        void *client;
        struct stmpe_client_info *ci;
        enum stmpe_partnum partnum;
@@ -188,7 +190,6 @@ struct stmpe_ts_platform_data {
  * @id: device id to distinguish between multiple STMPEs on the same board
  * @blocks: bitmask of blocks to enable (use STMPE_BLOCK_*)
  * @irq_trigger: IRQ trigger to use for the interrupt to the host
- * @irq_invert_polarity: IRQ line is connected with reversed polarity
  * @autosleep: bool to enable/disable stmpe autosleep
  * @autosleep_timeout: inactivity timeout in milliseconds for autosleep
  * @irq_base: base IRQ number.  %STMPE_NR_IRQS irqs will be used, or
@@ -205,7 +206,6 @@ struct stmpe_platform_data {
        unsigned int blocks;
        int irq_base;
        unsigned int irq_trigger;
-       bool irq_invert_polarity;
        bool autosleep;
        bool irq_over_gpio;
        int irq_gpio;
diff --git a/include/linux/mfd/ti_am335x_tscadc.h b/include/linux/mfd/ti_am335x_tscadc.h
new file mode 100644 (file)
index 0000000..c79ad5d
--- /dev/null
@@ -0,0 +1,152 @@
+#ifndef __LINUX_TI_AM335X_TSCADC_MFD_H
+#define __LINUX_TI_AM335X_TSCADC_MFD_H
+
+/*
+ * TI Touch Screen / ADC MFD driver
+ *
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/mfd/core.h>
+
+#define REG_RAWIRQSTATUS       0x024
+#define REG_IRQSTATUS          0x028
+#define REG_IRQENABLE          0x02C
+#define REG_IRQCLR             0x030
+#define REG_IRQWAKEUP          0x034
+#define REG_CTRL               0x040
+#define REG_ADCFSM             0x044
+#define REG_CLKDIV             0x04C
+#define REG_SE                 0x054
+#define REG_IDLECONFIG         0x058
+#define REG_CHARGECONFIG       0x05C
+#define REG_CHARGEDELAY                0x060
+#define REG_STEPCONFIG(n)      (0x64 + ((n - 1) * 8))
+#define REG_STEPDELAY(n)       (0x68 + ((n - 1) * 8))
+#define REG_FIFO0CNT           0xE4
+#define REG_FIFO0THR           0xE8
+#define REG_FIFO1CNT           0xF0
+#define REG_FIFO1THR           0xF4
+#define REG_FIFO0              0x100
+#define REG_FIFO1              0x200
+
+/*     Register Bitfields      */
+/* IRQ wakeup enable */
+#define IRQWKUP_ENB            BIT(0)
+
+/* Step Enable */
+#define STEPENB_MASK           (0x1FFFF << 0)
+#define STEPENB(val)           ((val) << 0)
+#define STPENB_STEPENB         STEPENB(0x1FFFF)
+#define STPENB_STEPENB_TC      STEPENB(0x1FFF)
+
+/* IRQ enable */
+#define IRQENB_HW_PEN          BIT(0)
+#define IRQENB_FIFO0THRES      BIT(2)
+#define IRQENB_FIFO1THRES      BIT(5)
+#define IRQENB_PENUP           BIT(9)
+
+/* Step Configuration */
+#define STEPCONFIG_MODE_MASK   (3 << 0)
+#define STEPCONFIG_MODE(val)   ((val) << 0)
+#define STEPCONFIG_MODE_HWSYNC STEPCONFIG_MODE(2)
+#define STEPCONFIG_AVG_MASK    (7 << 2)
+#define STEPCONFIG_AVG(val)    ((val) << 2)
+#define STEPCONFIG_AVG_16      STEPCONFIG_AVG(4)
+#define STEPCONFIG_XPP         BIT(5)
+#define STEPCONFIG_XNN         BIT(6)
+#define STEPCONFIG_YPP         BIT(7)
+#define STEPCONFIG_YNN         BIT(8)
+#define STEPCONFIG_XNP         BIT(9)
+#define STEPCONFIG_YPN         BIT(10)
+#define STEPCONFIG_INM_MASK    (0xF << 15)
+#define STEPCONFIG_INM(val)    ((val) << 15)
+#define STEPCONFIG_INM_ADCREFM STEPCONFIG_INM(8)
+#define STEPCONFIG_INP_MASK    (0xF << 19)
+#define STEPCONFIG_INP(val)    ((val) << 19)
+#define STEPCONFIG_INP_AN2     STEPCONFIG_INP(2)
+#define STEPCONFIG_INP_AN3     STEPCONFIG_INP(3)
+#define STEPCONFIG_INP_AN4     STEPCONFIG_INP(4)
+#define STEPCONFIG_INP_ADCREFM STEPCONFIG_INP(8)
+#define STEPCONFIG_FIFO1       BIT(26)
+
+/* Delay register */
+#define STEPDELAY_OPEN_MASK    (0x3FFFF << 0)
+#define STEPDELAY_OPEN(val)    ((val) << 0)
+#define STEPCONFIG_OPENDLY     STEPDELAY_OPEN(0x098)
+#define STEPDELAY_SAMPLE_MASK  (0xFF << 24)
+#define STEPDELAY_SAMPLE(val)  ((val) << 24)
+#define STEPCONFIG_SAMPLEDLY   STEPDELAY_SAMPLE(0)
+
+/* Charge Config */
+#define STEPCHARGE_RFP_MASK    (7 << 12)
+#define STEPCHARGE_RFP(val)    ((val) << 12)
+#define STEPCHARGE_RFP_XPUL    STEPCHARGE_RFP(1)
+#define STEPCHARGE_INM_MASK    (0xF << 15)
+#define STEPCHARGE_INM(val)    ((val) << 15)
+#define STEPCHARGE_INM_AN1     STEPCHARGE_INM(1)
+#define STEPCHARGE_INP_MASK    (0xF << 19)
+#define STEPCHARGE_INP(val)    ((val) << 19)
+#define STEPCHARGE_INP_AN1     STEPCHARGE_INP(1)
+#define STEPCHARGE_RFM_MASK    (3 << 23)
+#define STEPCHARGE_RFM(val)    ((val) << 23)
+#define STEPCHARGE_RFM_XNUR    STEPCHARGE_RFM(1)
+
+/* Charge delay */
+#define CHARGEDLY_OPEN_MASK    (0x3FFFF << 0)
+#define CHARGEDLY_OPEN(val)    ((val) << 0)
+#define CHARGEDLY_OPENDLY      CHARGEDLY_OPEN(1)
+
+/* Control register */
+#define CNTRLREG_TSCSSENB      BIT(0)
+#define CNTRLREG_STEPID                BIT(1)
+#define CNTRLREG_STEPCONFIGWRT BIT(2)
+#define CNTRLREG_POWERDOWN     BIT(4)
+#define CNTRLREG_AFE_CTRL_MASK (3 << 5)
+#define CNTRLREG_AFE_CTRL(val) ((val) << 5)
+#define CNTRLREG_4WIRE         CNTRLREG_AFE_CTRL(1)
+#define CNTRLREG_5WIRE         CNTRLREG_AFE_CTRL(2)
+#define CNTRLREG_8WIRE         CNTRLREG_AFE_CTRL(3)
+#define CNTRLREG_TSCENB                BIT(7)
+
+#define ADC_CLK                        3000000
+#define        MAX_CLK_DIV             7
+#define TOTAL_STEPS            16
+#define TOTAL_CHANNELS         8
+
+#define TSCADC_CELLS           2
+
+enum tscadc_cells {
+       TSC_CELL,
+       ADC_CELL,
+};
+
+struct mfd_tscadc_board {
+       struct tsc_data *tsc_init;
+       struct adc_data *adc_init;
+};
+
+struct ti_tscadc_dev {
+       struct device *dev;
+       struct regmap *regmap_tscadc;
+       void __iomem *tscadc_base;
+       int irq;
+       struct mfd_cell cells[TSCADC_CELLS];
+
+       /* tsc device */
+       struct titsc *tsc;
+
+       /* adc device */
+       struct adc_device *adc;
+};
+
+#endif
index 804e280..6694cf4 100644 (file)
 #define __LINUX_MFD_TPS65090_H
 
 #include <linux/irq.h>
+#include <linux/regmap.h>
+
+/* TPS65090 IRQs */
+enum {
+       TPS65090_IRQ_VAC_STATUS_CHANGE,
+       TPS65090_IRQ_VSYS_STATUS_CHANGE,
+       TPS65090_IRQ_BAT_STATUS_CHANGE,
+       TPS65090_IRQ_CHARGING_STATUS_CHANGE,
+       TPS65090_IRQ_CHARGING_COMPLETE,
+       TPS65090_IRQ_OVERLOAD_DCDC1,
+       TPS65090_IRQ_OVERLOAD_DCDC2,
+       TPS65090_IRQ_OVERLOAD_DCDC3,
+       TPS65090_IRQ_OVERLOAD_FET1,
+       TPS65090_IRQ_OVERLOAD_FET2,
+       TPS65090_IRQ_OVERLOAD_FET3,
+       TPS65090_IRQ_OVERLOAD_FET4,
+       TPS65090_IRQ_OVERLOAD_FET5,
+       TPS65090_IRQ_OVERLOAD_FET6,
+       TPS65090_IRQ_OVERLOAD_FET7,
+};
 
 /* TPS65090 Regulator ID */
 enum {
@@ -44,20 +64,9 @@ enum {
 };
 
 struct tps65090 {
-       struct mutex            lock;
        struct device           *dev;
-       struct i2c_client       *client;
        struct regmap           *rmap;
-       struct irq_chip         irq_chip;
-       struct mutex            irq_lock;
-       int                     irq_base;
-       unsigned int            id;
-};
-
-struct tps65090_subdev_info {
-       int             id;
-       const char      *name;
-       void            *platform_data;
+       struct regmap_irq_chip_data *irq_data;
 };
 
 /*
@@ -77,8 +86,6 @@ struct tps65090_regulator_plat_data {
 
 struct tps65090_platform_data {
        int irq_base;
-       int num_subdevs;
-       struct tps65090_subdev_info *subdevs;
        struct tps65090_regulator_plat_data *reg_pdata[TPS65090_REGULATOR_MAX];
 };
 
@@ -86,9 +93,39 @@ struct tps65090_platform_data {
  * NOTE: the functions below are not intended for use outside
  * of the TPS65090 sub-device drivers
  */
-extern int tps65090_write(struct device *dev, int reg, uint8_t val);
-extern int tps65090_read(struct device *dev, int reg, uint8_t *val);
-extern int tps65090_set_bits(struct device *dev, int reg, uint8_t bit_num);
-extern int tps65090_clr_bits(struct device *dev, int reg, uint8_t bit_num);
+static inline int tps65090_write(struct device *dev, int reg, uint8_t val)
+{
+       struct tps65090 *tps = dev_get_drvdata(dev);
+
+       return regmap_write(tps->rmap, reg, val);
+}
+
+static inline int tps65090_read(struct device *dev, int reg, uint8_t *val)
+{
+       struct tps65090 *tps = dev_get_drvdata(dev);
+       unsigned int temp_val;
+       int ret;
+
+       ret = regmap_read(tps->rmap, reg, &temp_val);
+       if (!ret)
+               *val = temp_val;
+       return ret;
+}
+
+static inline int tps65090_set_bits(struct device *dev, int reg,
+               uint8_t bit_num)
+{
+       struct tps65090 *tps = dev_get_drvdata(dev);
+
+       return regmap_update_bits(tps->rmap, reg, BIT(bit_num), ~0u);
+}
+
+static inline int tps65090_clr_bits(struct device *dev, int reg,
+               uint8_t bit_num)
+{
+       struct tps65090 *tps = dev_get_drvdata(dev);
+
+       return regmap_update_bits(tps->rmap, reg, BIT(bit_num), 0u);
+}
 
 #endif /*__LINUX_MFD_TPS65090_H */
index f8da0e1..8799454 100644 (file)
@@ -96,5 +96,6 @@ extern int tps6586x_set_bits(struct device *dev, int reg, uint8_t bit_mask);
 extern int tps6586x_clr_bits(struct device *dev, int reg, uint8_t bit_mask);
 extern int tps6586x_update(struct device *dev, int reg, uint8_t val,
                           uint8_t mask);
+extern int tps6586x_irq_get_virq(struct device *dev, int irq);
 
 #endif /*__LINUX_MFD_TPS6586X_H */
index 02e894f..20e433e 100644 (file)
 #define SPARE_SPARE_MASK                               0xFF
 #define SPARE_SPARE_SHIFT                              0
 
+#define TPS65910_INT_STS_RTC_PERIOD_IT_MASK                    0x80
+#define TPS65910_INT_STS_RTC_PERIOD_IT_SHIFT                   7
+#define TPS65910_INT_STS_RTC_ALARM_IT_MASK                     0x40
+#define TPS65910_INT_STS_RTC_ALARM_IT_SHIFT                    6
+#define TPS65910_INT_STS_HOTDIE_IT_MASK                                0x20
+#define TPS65910_INT_STS_HOTDIE_IT_SHIFT                       5
+#define TPS65910_INT_STS_PWRHOLD_F_IT_MASK                     0x10
+#define TPS65910_INT_STS_PWRHOLD_F_IT_SHIFT                    4
+#define TPS65910_INT_STS_PWRON_LP_IT_MASK                      0x08
+#define TPS65910_INT_STS_PWRON_LP_IT_SHIFT                     3
+#define TPS65910_INT_STS_PWRON_IT_MASK                         0x04
+#define TPS65910_INT_STS_PWRON_IT_SHIFT                                2
+#define TPS65910_INT_STS_VMBHI_IT_MASK                         0x02
+#define TPS65910_INT_STS_VMBHI_IT_SHIFT                                1
+#define TPS65910_INT_STS_VMBDCH_IT_MASK                                0x01
+#define TPS65910_INT_STS_VMBDCH_IT_SHIFT                       0
+
+#define TPS65910_INT_MSK_RTC_PERIOD_IT_MSK_MASK                        0x80
+#define TPS65910_INT_MSK_RTC_PERIOD_IT_MSK_SHIFT               7
+#define TPS65910_INT_MSK_RTC_ALARM_IT_MSK_MASK                 0x40
+#define TPS65910_INT_MSK_RTC_ALARM_IT_MSK_SHIFT                        6
+#define TPS65910_INT_MSK_HOTDIE_IT_MSK_MASK                    0x20
+#define TPS65910_INT_MSK_HOTDIE_IT_MSK_SHIFT                   5
+#define TPS65910_INT_MSK_PWRHOLD_IT_MSK_MASK                   0x10
+#define TPS65910_INT_MSK_PWRHOLD_IT_MSK_SHIFT                  4
+#define TPS65910_INT_MSK_PWRON_LP_IT_MSK_MASK                  0x08
+#define TPS65910_INT_MSK_PWRON_LP_IT_MSK_SHIFT                 3
+#define TPS65910_INT_MSK_PWRON_IT_MSK_MASK                     0x04
+#define TPS65910_INT_MSK_PWRON_IT_MSK_SHIFT                    2
+#define TPS65910_INT_MSK_VMBHI_IT_MSK_MASK                     0x02
+#define TPS65910_INT_MSK_VMBHI_IT_MSK_SHIFT                    1
+#define TPS65910_INT_MSK_VMBDCH_IT_MSK_MASK                    0x01
+#define TPS65910_INT_MSK_VMBDCH_IT_MSK_SHIFT                   0
+
+#define TPS65910_INT_STS2_GPIO0_F_IT_SHIFT                     2
+#define TPS65910_INT_STS2_GPIO0_F_IT_MASK                      0x02
+#define TPS65910_INT_STS2_GPIO0_R_IT_SHIFT                     1
+#define TPS65910_INT_STS2_GPIO0_R_IT_MASK                      0x01
+
+#define TPS65910_INT_MSK2_GPIO0_F_IT_MSK_SHIFT                 2
+#define TPS65910_INT_MSK2_GPIO0_F_IT_MSK_MASK                  0x02
+#define TPS65910_INT_MSK2_GPIO0_R_IT_MSK_SHIFT                 1
+#define TPS65910_INT_MSK2_GPIO0_R_IT_MSK_MASK                  0x01
 
 /*Register INT_STS  (0x80) register.RegisterDescription */
 #define INT_STS_RTC_PERIOD_IT_MASK                     0x80
 #define INT_STS_RTC_ALARM_IT_SHIFT                     6
 #define INT_STS_HOTDIE_IT_MASK                         0x20
 #define INT_STS_HOTDIE_IT_SHIFT                                5
-#define INT_STS_PWRHOLD_IT_MASK                                0x10
-#define INT_STS_PWRHOLD_IT_SHIFT                       4
+#define INT_STS_PWRHOLD_R_IT_MASK                      0x10
+#define INT_STS_PWRHOLD_R_IT_SHIFT                     4
 #define INT_STS_PWRON_LP_IT_MASK                       0x08
 #define INT_STS_PWRON_LP_IT_SHIFT                      3
 #define INT_STS_PWRON_IT_MASK                          0x04
 #define INT_STS_PWRON_IT_SHIFT                         2
 #define INT_STS_VMBHI_IT_MASK                          0x02
 #define INT_STS_VMBHI_IT_SHIFT                         1
-#define INT_STS_VMBDCH_IT_MASK                         0x01
-#define INT_STS_VMBDCH_IT_SHIFT                                0
+#define INT_STS_PWRHOLD_F_IT_MASK                      0x01
+#define INT_STS_PWRHOLD_F_IT_SHIFT                     0
 
 
 /*Register INT_MSK  (0x80) register.RegisterDescription */
 #define INT_MSK_RTC_ALARM_IT_MSK_SHIFT                 6
 #define INT_MSK_HOTDIE_IT_MSK_MASK                     0x20
 #define INT_MSK_HOTDIE_IT_MSK_SHIFT                    5
-#define INT_MSK_PWRHOLD_IT_MSK_MASK                    0x10
-#define INT_MSK_PWRHOLD_IT_MSK_SHIFT                   4
+#define INT_MSK_PWRHOLD_R_IT_MSK_MASK                  0x10
+#define INT_MSK_PWRHOLD_R_IT_MSK_SHIFT                 4
 #define INT_MSK_PWRON_LP_IT_MSK_MASK                   0x08
 #define INT_MSK_PWRON_LP_IT_MSK_SHIFT                  3
 #define INT_MSK_PWRON_IT_MSK_MASK                      0x04
 #define INT_MSK_PWRON_IT_MSK_SHIFT                     2
 #define INT_MSK_VMBHI_IT_MSK_MASK                      0x02
 #define INT_MSK_VMBHI_IT_MSK_SHIFT                     1
-#define INT_MSK_VMBDCH_IT_MSK_MASK                     0x01
-#define INT_MSK_VMBDCH_IT_MSK_SHIFT                    0
+#define INT_MSK_PWRHOLD_F_IT_MSK_MASK                  0x01
+#define INT_MSK_PWRHOLD_F_IT_MSK_SHIFT                 0
 
 
 /*Register INT_STS2  (0x80) register.RegisterDescription */
 
 
 /*Register INT_STS3  (0x80) register.RegisterDescription */
+#define INT_STS3_PWRDN_IT_MASK                         0x80
+#define INT_STS3_PWRDN_IT_SHIFT                                7
+#define INT_STS3_VMBCH2_L_IT_MASK                      0x40
+#define INT_STS3_VMBCH2_L_IT_SHIFT                     6
+#define INT_STS3_VMBCH2_H_IT_MASK                      0x20
+#define INT_STS3_VMBCH2_H_IT_SHIFT                     5
+#define INT_STS3_WTCHDG_IT_MASK                                0x10
+#define INT_STS3_WTCHDG_IT_SHIFT                       4
 #define INT_STS3_GPIO5_F_IT_MASK                       0x08
 #define INT_STS3_GPIO5_F_IT_SHIFT                      3
 #define INT_STS3_GPIO5_R_IT_MASK                       0x04
 
 
 /*Register INT_MSK3  (0x80) register.RegisterDescription */
+#define INT_MSK3_PWRDN_IT_MSK_MASK                     0x80
+#define INT_MSK3_PWRDN_IT_MSK_SHIFT                    7
+#define INT_MSK3_VMBCH2_L_IT_MSK_MASK                  0x40
+#define INT_MSK3_VMBCH2_L_IT_MSK_SHIFT                 6
+#define INT_MSK3_VMBCH2_H_IT_MSK_MASK                  0x20
+#define INT_MSK3_VMBCH2_H_IT_MSK_SHIFT                 5
+#define INT_MSK3_WTCHDG_IT_MSK_MASK                    0x10
+#define INT_MSK3_WTCHDG_IT_MSK_SHIFT                   4
 #define INT_MSK3_GPIO5_F_IT_MSK_MASK                   0x08
 #define INT_MSK3_GPIO5_F_IT_MSK_SHIFT                  3
 #define INT_MSK3_GPIO5_R_IT_MSK_MASK                   0x04
 #define TPS65910_IRQ_GPIO_F                            9
 #define TPS65910_NUM_IRQ                               10
 
-#define TPS65911_IRQ_VBAT_VMBDCH                       0
-#define TPS65911_IRQ_VBAT_VMBDCH2L                     1
-#define TPS65911_IRQ_VBAT_VMBDCH2H                     2
-#define TPS65911_IRQ_VBAT_VMHI                         3
-#define TPS65911_IRQ_PWRON                             4
-#define TPS65911_IRQ_PWRON_LP                          5
-#define TPS65911_IRQ_PWRHOLD_F                         6
-#define TPS65911_IRQ_PWRHOLD_R                         7
-#define TPS65911_IRQ_HOTDIE                            8
-#define TPS65911_IRQ_RTC_ALARM                         9
-#define TPS65911_IRQ_RTC_PERIOD                                10
-#define TPS65911_IRQ_GPIO0_R                           11
-#define TPS65911_IRQ_GPIO0_F                           12
-#define TPS65911_IRQ_GPIO1_R                           13
-#define TPS65911_IRQ_GPIO1_F                           14
-#define TPS65911_IRQ_GPIO2_R                           15
-#define TPS65911_IRQ_GPIO2_F                           16
-#define TPS65911_IRQ_GPIO3_R                           17
-#define TPS65911_IRQ_GPIO3_F                           18
-#define TPS65911_IRQ_GPIO4_R                           19
-#define TPS65911_IRQ_GPIO4_F                           20
-#define TPS65911_IRQ_GPIO5_R                           21
-#define TPS65911_IRQ_GPIO5_F                           22
-#define TPS65911_IRQ_WTCHDG                            23
-#define TPS65911_IRQ_PWRDN                             24
-
-#define TPS65911_NUM_IRQ                               25
-
+#define TPS65911_IRQ_PWRHOLD_F                         0
+#define TPS65911_IRQ_VBAT_VMHI                         1
+#define TPS65911_IRQ_PWRON                             2
+#define TPS65911_IRQ_PWRON_LP                          3
+#define TPS65911_IRQ_PWRHOLD_R                         4
+#define TPS65911_IRQ_HOTDIE                            5
+#define TPS65911_IRQ_RTC_ALARM                         6
+#define TPS65911_IRQ_RTC_PERIOD                                7
+#define TPS65911_IRQ_GPIO0_R                           8
+#define TPS65911_IRQ_GPIO0_F                           9
+#define TPS65911_IRQ_GPIO1_R                           10
+#define TPS65911_IRQ_GPIO1_F                           11
+#define TPS65911_IRQ_GPIO2_R                           12
+#define TPS65911_IRQ_GPIO2_F                           13
+#define TPS65911_IRQ_GPIO3_R                           14
+#define TPS65911_IRQ_GPIO3_F                           15
+#define TPS65911_IRQ_GPIO4_R                           16
+#define TPS65911_IRQ_GPIO4_F                           17
+#define TPS65911_IRQ_GPIO5_R                           18
+#define TPS65911_IRQ_GPIO5_F                           19
+#define TPS65911_IRQ_WTCHDG                            20
+#define TPS65911_IRQ_VMBCH2_H                          21
+#define TPS65911_IRQ_VMBCH2_L                          22
+#define TPS65911_IRQ_PWRDN                             23
+
+#define TPS65911_NUM_IRQ                               24
 
 /* GPIO Register Definitions */
 #define TPS65910_GPIO_DEB                              BIT(2)
@@ -836,7 +893,6 @@ struct tps65910 {
        struct device *dev;
        struct i2c_client *i2c_client;
        struct regmap *regmap;
-       struct mutex io_mutex;
        unsigned int id;
 
        /* Client devices */
@@ -848,12 +904,8 @@ struct tps65910 {
        struct tps65910_board *of_plat_data;
 
        /* IRQ Handling */
-       struct mutex irq_lock;
        int chip_irq;
-       int irq_base;
-       int irq_num;
-       u32 irq_mask;
-       struct irq_domain *domain;
+       struct regmap_irq_chip_data *irq_data;
 };
 
 struct tps65910_platform_data {
@@ -861,10 +913,6 @@ struct tps65910_platform_data {
        int irq_base;
 };
 
-int tps65910_irq_init(struct tps65910 *tps65910, int irq,
-               struct tps65910_platform_data *pdata);
-int tps65910_irq_exit(struct tps65910 *tps65910);
-
 static inline int tps65910_chip_id(struct tps65910 *tps65910)
 {
        return tps65910->id;
@@ -900,4 +948,9 @@ static inline int tps65910_reg_update_bits(struct tps65910 *tps65910, u8 reg,
        return regmap_update_bits(tps65910->regmap, reg, mask, val);
 }
 
+static inline int tps65910_irq_get_virq(struct tps65910 *tps65910, int irq)
+{
+       return regmap_irq_get_virq(tps65910->irq_data, irq);
+}
+
 #endif /*  __LINUX_MFD_TPS65910_H */
diff --git a/include/linux/mfd/tps80031.h b/include/linux/mfd/tps80031.h
new file mode 100644 (file)
index 0000000..2c75c9c
--- /dev/null
@@ -0,0 +1,637 @@
+/*
+ * tps80031.h -- TI TPS80031 and TI TPS80032 PMIC driver.
+ *
+ * Copyright (c) 2012, NVIDIA Corporation.
+ *
+ * Author: Laxman Dewangan <ldewangan@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind,
+ * whether express or implied; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ * 02111-1307, USA
+ */
+
+#ifndef __LINUX_MFD_TPS80031_H
+#define __LINUX_MFD_TPS80031_H
+
+#include <linux/device.h>
+#include <linux/regmap.h>
+
+/* Pull-ups/Pull-downs */
+#define TPS80031_CFG_INPUT_PUPD1                       0xF0
+#define TPS80031_CFG_INPUT_PUPD2                       0xF1
+#define TPS80031_CFG_INPUT_PUPD3                       0xF2
+#define TPS80031_CFG_INPUT_PUPD4                       0xF3
+#define TPS80031_CFG_LDO_PD1                           0xF4
+#define TPS80031_CFG_LDO_PD2                           0xF5
+#define TPS80031_CFG_SMPS_PD                           0xF6
+
+/* Real Time Clock */
+#define TPS80031_SECONDS_REG                           0x00
+#define TPS80031_MINUTES_REG                           0x01
+#define TPS80031_HOURS_REG                             0x02
+#define TPS80031_DAYS_REG                              0x03
+#define TPS80031_MONTHS_REG                            0x04
+#define TPS80031_YEARS_REG                             0x05
+#define TPS80031_WEEKS_REG                             0x06
+#define TPS80031_ALARM_SECONDS_REG                     0x08
+#define TPS80031_ALARM_MINUTES_REG                     0x09
+#define TPS80031_ALARM_HOURS_REG                       0x0A
+#define TPS80031_ALARM_DAYS_REG                                0x0B
+#define TPS80031_ALARM_MONTHS_REG                      0x0C
+#define TPS80031_ALARM_YEARS_REG                       0x0D
+#define TPS80031_RTC_CTRL_REG                          0x10
+#define TPS80031_RTC_STATUS_REG                                0x11
+#define TPS80031_RTC_INTERRUPTS_REG                    0x12
+#define TPS80031_RTC_COMP_LSB_REG                      0x13
+#define TPS80031_RTC_COMP_MSB_REG                      0x14
+#define TPS80031_RTC_RESET_STATUS_REG                  0x16
+
+/*PMC Master Module */
+#define TPS80031_PHOENIX_START_CONDITION               0x1F
+#define TPS80031_PHOENIX_MSK_TRANSITION                        0x20
+#define TPS80031_STS_HW_CONDITIONS                     0x21
+#define TPS80031_PHOENIX_LAST_TURNOFF_STS              0x22
+#define TPS80031_VSYSMIN_LO_THRESHOLD                  0x23
+#define TPS80031_VSYSMIN_HI_THRESHOLD                  0x24
+#define TPS80031_PHOENIX_DEV_ON                                0x25
+#define TPS80031_STS_PWR_GRP_STATE                     0x27
+#define TPS80031_PH_CFG_VSYSLOW                                0x28
+#define TPS80031_PH_STS_BOOT                           0x29
+#define TPS80031_PHOENIX_SENS_TRANSITION               0x2A
+#define TPS80031_PHOENIX_SEQ_CFG                       0x2B
+#define TPS80031_PRIMARY_WATCHDOG_CFG                  0X2C
+#define TPS80031_KEY_PRESS_DUR_CFG                     0X2D
+#define TPS80031_SMPS_LDO_SHORT_STS                    0x2E
+
+/* PMC Slave Module - Broadcast */
+#define TPS80031_BROADCAST_ADDR_ALL                    0x31
+#define TPS80031_BROADCAST_ADDR_REF                    0x32
+#define TPS80031_BROADCAST_ADDR_PROV                   0x33
+#define TPS80031_BROADCAST_ADDR_CLK_RST                        0x34
+
+/* PMC Slave Module  SMPS Regulators */
+#define TPS80031_SMPS4_CFG_TRANS                       0x41
+#define TPS80031_SMPS4_CFG_STATE                       0x42
+#define TPS80031_SMPS4_CFG_VOLTAGE                     0x44
+#define TPS80031_VIO_CFG_TRANS                         0x47
+#define TPS80031_VIO_CFG_STATE                         0x48
+#define TPS80031_VIO_CFG_FORCE                         0x49
+#define TPS80031_VIO_CFG_VOLTAGE                       0x4A
+#define TPS80031_VIO_CFG_STEP                          0x48
+#define TPS80031_SMPS1_CFG_TRANS                       0x53
+#define TPS80031_SMPS1_CFG_STATE                       0x54
+#define TPS80031_SMPS1_CFG_FORCE                       0x55
+#define TPS80031_SMPS1_CFG_VOLTAGE                     0x56
+#define TPS80031_SMPS1_CFG_STEP                                0x57
+#define TPS80031_SMPS2_CFG_TRANS                       0x59
+#define TPS80031_SMPS2_CFG_STATE                       0x5A
+#define TPS80031_SMPS2_CFG_FORCE                       0x5B
+#define TPS80031_SMPS2_CFG_VOLTAGE                     0x5C
+#define TPS80031_SMPS2_CFG_STEP                                0x5D
+#define TPS80031_SMPS3_CFG_TRANS                       0x65
+#define TPS80031_SMPS3_CFG_STATE                       0x66
+#define TPS80031_SMPS3_CFG_VOLTAGE                     0x68
+
+/* PMC Slave Module  LDO Regulators */
+#define TPS80031_VANA_CFG_TRANS                                0x81
+#define TPS80031_VANA_CFG_STATE                                0x82
+#define TPS80031_VANA_CFG_VOLTAGE                      0x83
+#define TPS80031_LDO2_CFG_TRANS                                0x85
+#define TPS80031_LDO2_CFG_STATE                                0x86
+#define TPS80031_LDO2_CFG_VOLTAGE                      0x87
+#define TPS80031_LDO4_CFG_TRANS                                0x89
+#define TPS80031_LDO4_CFG_STATE                                0x8A
+#define TPS80031_LDO4_CFG_VOLTAGE                      0x8B
+#define TPS80031_LDO3_CFG_TRANS                                0x8D
+#define TPS80031_LDO3_CFG_STATE                                0x8E
+#define TPS80031_LDO3_CFG_VOLTAGE                      0x8F
+#define TPS80031_LDO6_CFG_TRANS                                0x91
+#define TPS80031_LDO6_CFG_STATE                                0x92
+#define TPS80031_LDO6_CFG_VOLTAGE                      0x93
+#define TPS80031_LDOLN_CFG_TRANS                       0x95
+#define TPS80031_LDOLN_CFG_STATE                       0x96
+#define TPS80031_LDOLN_CFG_VOLTAGE                     0x97
+#define TPS80031_LDO5_CFG_TRANS                                0x99
+#define TPS80031_LDO5_CFG_STATE                                0x9A
+#define TPS80031_LDO5_CFG_VOLTAGE                      0x9B
+#define TPS80031_LDO1_CFG_TRANS                                0x9D
+#define TPS80031_LDO1_CFG_STATE                                0x9E
+#define TPS80031_LDO1_CFG_VOLTAGE                      0x9F
+#define TPS80031_LDOUSB_CFG_TRANS                      0xA1
+#define TPS80031_LDOUSB_CFG_STATE                      0xA2
+#define TPS80031_LDOUSB_CFG_VOLTAGE                    0xA3
+#define TPS80031_LDO7_CFG_TRANS                                0xA5
+#define TPS80031_LDO7_CFG_STATE                                0xA6
+#define TPS80031_LDO7_CFG_VOLTAGE                      0xA7
+
+/* PMC Slave Module  External Control */
+#define TPS80031_REGEN1_CFG_TRANS                      0xAE
+#define TPS80031_REGEN1_CFG_STATE                      0xAF
+#define TPS80031_REGEN2_CFG_TRANS                      0xB1
+#define TPS80031_REGEN2_CFG_STATE                      0xB2
+#define TPS80031_SYSEN_CFG_TRANS                       0xB4
+#define TPS80031_SYSEN_CFG_STATE                       0xB5
+
+/* PMC Slave Module  Internal Control */
+#define TPS80031_NRESPWRON_CFG_TRANS                   0xB7
+#define TPS80031_NRESPWRON_CFG_STATE                   0xB8
+#define TPS80031_CLK32KAO_CFG_TRANS                    0xBA
+#define TPS80031_CLK32KAO_CFG_STATE                    0xBB
+#define TPS80031_CLK32KG_CFG_TRANS                     0xBD
+#define TPS80031_CLK32KG_CFG_STATE                     0xBE
+#define TPS80031_CLK32KAUDIO_CFG_TRANS                 0xC0
+#define TPS80031_CLK32KAUDIO_CFG_STATE                 0xC1
+#define TPS80031_VRTC_CFG_TRANS                                0xC3
+#define TPS80031_VRTC_CFG_STATE                                0xC4
+#define TPS80031_BIAS_CFG_TRANS                                0xC6
+#define TPS80031_BIAS_CFG_STATE                                0xC7
+#define TPS80031_VSYSMIN_HI_CFG_TRANS                  0xC9
+#define TPS80031_VSYSMIN_HI_CFG_STATE                  0xCA
+#define TPS80031_RC6MHZ_CFG_TRANS                      0xCC
+#define TPS80031_RC6MHZ_CFG_STATE                      0xCD
+#define TPS80031_TMP_CFG_TRANS                         0xCF
+#define TPS80031_TMP_CFG_STATE                         0xD0
+
+/* PMC Slave Module  resources assignment */
+#define TPS80031_PREQ1_RES_ASS_A                       0xD7
+#define TPS80031_PREQ1_RES_ASS_B                       0xD8
+#define TPS80031_PREQ1_RES_ASS_C                       0xD9
+#define TPS80031_PREQ2_RES_ASS_A                       0xDA
+#define TPS80031_PREQ2_RES_ASS_B                       0xDB
+#define TPS80031_PREQ2_RES_ASS_C                       0xDC
+#define TPS80031_PREQ3_RES_ASS_A                       0xDD
+#define TPS80031_PREQ3_RES_ASS_B                       0xDE
+#define TPS80031_PREQ3_RES_ASS_C                       0xDF
+
+/* PMC Slave Module  Miscellaneous */
+#define TPS80031_SMPS_OFFSET                           0xE0
+#define TPS80031_SMPS_MULT                             0xE3
+#define TPS80031_MISC1                                 0xE4
+#define TPS80031_MISC2                                 0xE5
+#define TPS80031_BBSPOR_CFG                            0xE6
+#define TPS80031_TMP_CFG                               0xE7
+
+/* Battery Charging Controller and Indicator LED */
+#define TPS80031_CONTROLLER_CTRL2                      0xDA
+#define TPS80031_CONTROLLER_VSEL_COMP                  0xDB
+#define TPS80031_CHARGERUSB_VSYSREG                    0xDC
+#define TPS80031_CHARGERUSB_VICHRG_PC                  0xDD
+#define TPS80031_LINEAR_CHRG_STS                       0xDE
+#define TPS80031_CONTROLLER_INT_MASK                   0xE0
+#define TPS80031_CONTROLLER_CTRL1                      0xE1
+#define TPS80031_CONTROLLER_WDG                                0xE2
+#define TPS80031_CONTROLLER_STAT1                      0xE3
+#define TPS80031_CHARGERUSB_INT_STATUS                 0xE4
+#define TPS80031_CHARGERUSB_INT_MASK                   0xE5
+#define TPS80031_CHARGERUSB_STATUS_INT1                        0xE6
+#define TPS80031_CHARGERUSB_STATUS_INT2                        0xE7
+#define TPS80031_CHARGERUSB_CTRL1                      0xE8
+#define TPS80031_CHARGERUSB_CTRL2                      0xE9
+#define TPS80031_CHARGERUSB_CTRL3                      0xEA
+#define TPS80031_CHARGERUSB_STAT1                      0xEB
+#define TPS80031_CHARGERUSB_VOREG                      0xEC
+#define TPS80031_CHARGERUSB_VICHRG                     0xED
+#define TPS80031_CHARGERUSB_CINLIMIT                   0xEE
+#define TPS80031_CHARGERUSB_CTRLLIMIT1                 0xEF
+#define TPS80031_CHARGERUSB_CTRLLIMIT2                 0xF0
+#define TPS80031_LED_PWM_CTRL1                         0xF4
+#define TPS80031_LED_PWM_CTRL2                         0xF5
+
+/* USB On-The-Go  */
+#define TPS80031_BACKUP_REG                            0xFA
+#define TPS80031_USB_VENDOR_ID_LSB                     0x00
+#define TPS80031_USB_VENDOR_ID_MSB                     0x01
+#define TPS80031_USB_PRODUCT_ID_LSB                    0x02
+#define TPS80031_USB_PRODUCT_ID_MSB                    0x03
+#define TPS80031_USB_VBUS_CTRL_SET                     0x04
+#define TPS80031_USB_VBUS_CTRL_CLR                     0x05
+#define TPS80031_USB_ID_CTRL_SET                       0x06
+#define TPS80031_USB_ID_CTRL_CLR                       0x07
+#define TPS80031_USB_VBUS_INT_SRC                      0x08
+#define TPS80031_USB_VBUS_INT_LATCH_SET                        0x09
+#define TPS80031_USB_VBUS_INT_LATCH_CLR                        0x0A
+#define TPS80031_USB_VBUS_INT_EN_LO_SET                        0x0B
+#define TPS80031_USB_VBUS_INT_EN_LO_CLR                        0x0C
+#define TPS80031_USB_VBUS_INT_EN_HI_SET                        0x0D
+#define TPS80031_USB_VBUS_INT_EN_HI_CLR                        0x0E
+#define TPS80031_USB_ID_INT_SRC                                0x0F
+#define TPS80031_USB_ID_INT_LATCH_SET                  0x10
+#define TPS80031_USB_ID_INT_LATCH_CLR                  0x11
+#define TPS80031_USB_ID_INT_EN_LO_SET                  0x12
+#define TPS80031_USB_ID_INT_EN_LO_CLR                  0x13
+#define TPS80031_USB_ID_INT_EN_HI_SET                  0x14
+#define TPS80031_USB_ID_INT_EN_HI_CLR                  0x15
+#define TPS80031_USB_OTG_ADP_CTRL                      0x16
+#define TPS80031_USB_OTG_ADP_HIGH                      0x17
+#define TPS80031_USB_OTG_ADP_LOW                       0x18
+#define TPS80031_USB_OTG_ADP_RISE                      0x19
+#define TPS80031_USB_OTG_REVISION                      0x1A
+
+/* Gas Gauge */
+#define TPS80031_FG_REG_00                             0xC0
+#define TPS80031_FG_REG_01                             0xC1
+#define TPS80031_FG_REG_02                             0xC2
+#define TPS80031_FG_REG_03                             0xC3
+#define TPS80031_FG_REG_04                             0xC4
+#define TPS80031_FG_REG_05                             0xC5
+#define TPS80031_FG_REG_06                             0xC6
+#define TPS80031_FG_REG_07                             0xC7
+#define TPS80031_FG_REG_08                             0xC8
+#define TPS80031_FG_REG_09                             0xC9
+#define TPS80031_FG_REG_10                             0xCA
+#define TPS80031_FG_REG_11                             0xCB
+
+/* General Purpose ADC */
+#define TPS80031_GPADC_CTRL                            0x2E
+#define TPS80031_GPADC_CTRL2                           0x2F
+#define TPS80031_RTSELECT_LSB                          0x32
+#define TPS80031_RTSELECT_ISB                          0x33
+#define TPS80031_RTSELECT_MSB                          0x34
+#define TPS80031_GPSELECT_ISB                          0x35
+#define TPS80031_CTRL_P1                               0x36
+#define TPS80031_RTCH0_LSB                             0x37
+#define TPS80031_RTCH0_MSB                             0x38
+#define TPS80031_RTCH1_LSB                             0x39
+#define TPS80031_RTCH1_MSB                             0x3A
+#define TPS80031_GPCH0_LSB                             0x3B
+#define TPS80031_GPCH0_MSB                             0x3C
+
+/* SIM, MMC and Battery Detection */
+#define TPS80031_SIMDEBOUNCING                         0xEB
+#define TPS80031_SIMCTRL                               0xEC
+#define TPS80031_MMCDEBOUNCING                         0xED
+#define TPS80031_MMCCTRL                               0xEE
+#define TPS80031_BATDEBOUNCING                         0xEF
+
+/* Vibrator Driver and PWMs */
+#define TPS80031_VIBCTRL                               0x9B
+#define TPS80031_VIBMODE                               0x9C
+#define TPS80031_PWM1ON                                        0xBA
+#define TPS80031_PWM1OFF                               0xBB
+#define TPS80031_PWM2ON                                        0xBD
+#define TPS80031_PWM2OFF                               0xBE
+
+/* Control Interface */
+#define TPS80031_INT_STS_A                             0xD0
+#define TPS80031_INT_STS_B                             0xD1
+#define TPS80031_INT_STS_C                             0xD2
+#define TPS80031_INT_MSK_LINE_A                                0xD3
+#define TPS80031_INT_MSK_LINE_B                                0xD4
+#define TPS80031_INT_MSK_LINE_C                                0xD5
+#define TPS80031_INT_MSK_STS_A                         0xD6
+#define TPS80031_INT_MSK_STS_B                         0xD7
+#define TPS80031_INT_MSK_STS_C                         0xD8
+#define TPS80031_TOGGLE1                               0x90
+#define TPS80031_TOGGLE2                               0x91
+#define TPS80031_TOGGLE3                               0x92
+#define TPS80031_PWDNSTATUS1                           0x93
+#define TPS80031_PWDNSTATUS2                           0x94
+#define TPS80031_VALIDITY0                             0x17
+#define TPS80031_VALIDITY1                             0x18
+#define TPS80031_VALIDITY2                             0x19
+#define TPS80031_VALIDITY3                             0x1A
+#define TPS80031_VALIDITY4                             0x1B
+#define TPS80031_VALIDITY5                             0x1C
+#define TPS80031_VALIDITY6                             0x1D
+#define TPS80031_VALIDITY7                             0x1E
+
+/* Version number related register */
+#define TPS80031_JTAGVERNUM                            0x87
+#define TPS80031_EPROM_REV                             0xDF
+
+/* GPADC Trimming Bits. */
+#define TPS80031_GPADC_TRIM0                           0xCC
+#define TPS80031_GPADC_TRIM1                           0xCD
+#define TPS80031_GPADC_TRIM2                           0xCE
+#define TPS80031_GPADC_TRIM3                           0xCF
+#define TPS80031_GPADC_TRIM4                           0xD0
+#define TPS80031_GPADC_TRIM5                           0xD1
+#define TPS80031_GPADC_TRIM6                           0xD2
+#define TPS80031_GPADC_TRIM7                           0xD3
+#define TPS80031_GPADC_TRIM8                           0xD4
+#define TPS80031_GPADC_TRIM9                           0xD5
+#define TPS80031_GPADC_TRIM10                          0xD6
+#define TPS80031_GPADC_TRIM11                          0xD7
+#define TPS80031_GPADC_TRIM12                          0xD8
+#define TPS80031_GPADC_TRIM13                          0xD9
+#define TPS80031_GPADC_TRIM14                          0xDA
+#define TPS80031_GPADC_TRIM15                          0xDB
+#define TPS80031_GPADC_TRIM16                          0xDC
+#define TPS80031_GPADC_TRIM17                          0xDD
+#define TPS80031_GPADC_TRIM18                          0xDE
+
+/* TPS80031_CONTROLLER_STAT1 bit fields */
+#define TPS80031_CONTROLLER_STAT1_BAT_TEMP             0
+#define TPS80031_CONTROLLER_STAT1_BAT_REMOVED          1
+#define TPS80031_CONTROLLER_STAT1_VBUS_DET             2
+#define TPS80031_CONTROLLER_STAT1_VAC_DET              3
+#define TPS80031_CONTROLLER_STAT1_FAULT_WDG            4
+#define TPS80031_CONTROLLER_STAT1_LINCH_GATED          6
+/* TPS80031_CONTROLLER_INT_MASK bit filed */
+#define TPS80031_CONTROLLER_INT_MASK_MVAC_DET          0
+#define TPS80031_CONTROLLER_INT_MASK_MVBUS_DET         1
+#define TPS80031_CONTROLLER_INT_MASK_MBAT_TEMP         2
+#define TPS80031_CONTROLLER_INT_MASK_MFAULT_WDG                3
+#define TPS80031_CONTROLLER_INT_MASK_MBAT_REMOVED      4
+#define TPS80031_CONTROLLER_INT_MASK_MLINCH_GATED      5
+
+#define TPS80031_CHARGE_CONTROL_SUB_INT_MASK           0x3F
+
+/* TPS80031_PHOENIX_DEV_ON bit field */
+#define TPS80031_DEVOFF                                        0x1
+
+#define TPS80031_EXT_CONTROL_CFG_TRANS                 0
+#define TPS80031_EXT_CONTROL_CFG_STATE                 1
+
+/* State register field */
+#define TPS80031_STATE_OFF                             0x00
+#define TPS80031_STATE_ON                              0x01
+#define TPS80031_STATE_MASK                            0x03
+
+/* Trans register field */
+#define TPS80031_TRANS_ACTIVE_OFF                      0x00
+#define TPS80031_TRANS_ACTIVE_ON                       0x01
+#define TPS80031_TRANS_ACTIVE_MASK                     0x03
+#define TPS80031_TRANS_SLEEP_OFF                       0x00
+#define TPS80031_TRANS_SLEEP_ON                                0x04
+#define TPS80031_TRANS_SLEEP_MASK                      0x0C
+#define TPS80031_TRANS_OFF_OFF                         0x00
+#define TPS80031_TRANS_OFF_ACTIVE                      0x10
+#define TPS80031_TRANS_OFF_MASK                                0x30
+
+#define TPS80031_EXT_PWR_REQ           (TPS80031_PWR_REQ_INPUT_PREQ1 | \
+                                       TPS80031_PWR_REQ_INPUT_PREQ2 | \
+                                       TPS80031_PWR_REQ_INPUT_PREQ3)
+
+/* TPS80031_BBSPOR_CFG bit field */
+#define TPS80031_BBSPOR_CHG_EN                         0x8
+#define TPS80031_MAX_REGISTER                          0xFF
+
+struct i2c_client;
+
+/* Supported chips */
+enum chips {
+       TPS80031 = 0x00000001,
+       TPS80032 = 0x00000002,
+};
+
+enum {
+       TPS80031_INT_PWRON,
+       TPS80031_INT_RPWRON,
+       TPS80031_INT_SYS_VLOW,
+       TPS80031_INT_RTC_ALARM,
+       TPS80031_INT_RTC_PERIOD,
+       TPS80031_INT_HOT_DIE,
+       TPS80031_INT_VXX_SHORT,
+       TPS80031_INT_SPDURATION,
+       TPS80031_INT_WATCHDOG,
+       TPS80031_INT_BAT,
+       TPS80031_INT_SIM,
+       TPS80031_INT_MMC,
+       TPS80031_INT_RES,
+       TPS80031_INT_GPADC_RT,
+       TPS80031_INT_GPADC_SW2_EOC,
+       TPS80031_INT_CC_AUTOCAL,
+       TPS80031_INT_ID_WKUP,
+       TPS80031_INT_VBUSS_WKUP,
+       TPS80031_INT_ID,
+       TPS80031_INT_VBUS,
+       TPS80031_INT_CHRG_CTRL,
+       TPS80031_INT_EXT_CHRG,
+       TPS80031_INT_INT_CHRG,
+       TPS80031_INT_RES2,
+       TPS80031_INT_BAT_TEMP_OVRANGE,
+       TPS80031_INT_BAT_REMOVED,
+       TPS80031_INT_VBUS_DET,
+       TPS80031_INT_VAC_DET,
+       TPS80031_INT_FAULT_WDG,
+       TPS80031_INT_LINCH_GATED,
+
+       /* Last interrupt id to get the end number */
+       TPS80031_INT_NR,
+};
+
+/* TPS80031 Slave IDs */
+#define TPS80031_NUM_SLAVES                            4
+#define TPS80031_SLAVE_ID0                             0
+#define TPS80031_SLAVE_ID1                             1
+#define TPS80031_SLAVE_ID2                             2
+#define TPS80031_SLAVE_ID3                             3
+
+/* TPS80031 I2C addresses */
+#define TPS80031_I2C_ID0_ADDR                          0x12
+#define TPS80031_I2C_ID1_ADDR                          0x48
+#define TPS80031_I2C_ID2_ADDR                          0x49
+#define TPS80031_I2C_ID3_ADDR                          0x4A
+
+enum {
+       TPS80031_REGULATOR_VIO,
+       TPS80031_REGULATOR_SMPS1,
+       TPS80031_REGULATOR_SMPS2,
+       TPS80031_REGULATOR_SMPS3,
+       TPS80031_REGULATOR_SMPS4,
+       TPS80031_REGULATOR_VANA,
+       TPS80031_REGULATOR_LDO1,
+       TPS80031_REGULATOR_LDO2,
+       TPS80031_REGULATOR_LDO3,
+       TPS80031_REGULATOR_LDO4,
+       TPS80031_REGULATOR_LDO5,
+       TPS80031_REGULATOR_LDO6,
+       TPS80031_REGULATOR_LDO7,
+       TPS80031_REGULATOR_LDOLN,
+       TPS80031_REGULATOR_LDOUSB,
+       TPS80031_REGULATOR_VBUS,
+       TPS80031_REGULATOR_REGEN1,
+       TPS80031_REGULATOR_REGEN2,
+       TPS80031_REGULATOR_SYSEN,
+       TPS80031_REGULATOR_MAX,
+};
+
+/* Different configurations for the rails */
+enum {
+       /* USBLDO input selection */
+       TPS80031_USBLDO_INPUT_VSYS              = 0x00000001,
+       TPS80031_USBLDO_INPUT_PMID              = 0x00000002,
+
+       /* LDO3 output mode */
+       TPS80031_LDO3_OUTPUT_VIB                = 0x00000004,
+
+       /* VBUS configuration */
+       TPS80031_VBUS_DISCHRG_EN_PDN            = 0x00000004,
+       TPS80031_VBUS_SW_ONLY                   = 0x00000008,
+       TPS80031_VBUS_SW_N_ID                   = 0x00000010,
+};
+
+/* External controls requests */
+enum tps80031_ext_control {
+       TPS80031_PWR_REQ_INPUT_NONE             = 0x00000000,
+       TPS80031_PWR_REQ_INPUT_PREQ1            = 0x00000001,
+       TPS80031_PWR_REQ_INPUT_PREQ2            = 0x00000002,
+       TPS80031_PWR_REQ_INPUT_PREQ3            = 0x00000004,
+       TPS80031_PWR_OFF_ON_SLEEP               = 0x00000008,
+       TPS80031_PWR_ON_ON_SLEEP                = 0x00000010,
+};
+
+enum tps80031_pupd_pins {
+       TPS80031_PREQ1 = 0,
+       TPS80031_PREQ2A,
+       TPS80031_PREQ2B,
+       TPS80031_PREQ2C,
+       TPS80031_PREQ3,
+       TPS80031_NRES_WARM,
+       TPS80031_PWM_FORCE,
+       TPS80031_CHRG_EXT_CHRG_STATZ,
+       TPS80031_SIM,
+       TPS80031_MMC,
+       TPS80031_GPADC_START,
+       TPS80031_DVSI2C_SCL,
+       TPS80031_DVSI2C_SDA,
+       TPS80031_CTLI2C_SCL,
+       TPS80031_CTLI2C_SDA,
+};
+
+enum tps80031_pupd_settings {
+       TPS80031_PUPD_NORMAL,
+       TPS80031_PUPD_PULLDOWN,
+       TPS80031_PUPD_PULLUP,
+};
+
+struct tps80031 {
+       struct device           *dev;
+       unsigned long           chip_info;
+       int                     es_version;
+       struct i2c_client       *clients[TPS80031_NUM_SLAVES];
+       struct regmap           *regmap[TPS80031_NUM_SLAVES];
+       struct regmap_irq_chip_data *irq_data;
+};
+
+struct tps80031_pupd_init_data {
+       int input_pin;
+       int setting;
+};
+
+/*
+ * struct tps80031_regulator_platform_data - tps80031 regulator platform data.
+ *
+ * @reg_init_data: The regulator init data.
+ * @ext_ctrl_flag: External control flag for sleep/power request control.
+ * @config_flags: Configuration flag to configure the rails.
+ *               It should be ORed of config enums.
+ */
+
+struct tps80031_regulator_platform_data {
+       struct regulator_init_data *reg_init_data;
+       unsigned int ext_ctrl_flag;
+       unsigned int config_flags;
+};
+
+struct tps80031_platform_data {
+       int irq_base;
+       bool use_power_off;
+       struct tps80031_pupd_init_data *pupd_init_data;
+       int pupd_init_data_size;
+       struct tps80031_regulator_platform_data
+                       *regulator_pdata[TPS80031_REGULATOR_MAX];
+};
+
+static inline int tps80031_write(struct device *dev, int sid,
+               int reg, uint8_t val)
+{
+       struct tps80031 *tps80031 = dev_get_drvdata(dev);
+
+       return regmap_write(tps80031->regmap[sid], reg, val);
+}
+
+static inline int tps80031_writes(struct device *dev, int sid, int reg,
+               int len, uint8_t *val)
+{
+       struct tps80031 *tps80031 = dev_get_drvdata(dev);
+
+       return regmap_bulk_write(tps80031->regmap[sid], reg, val, len);
+}
+
+static inline int tps80031_read(struct device *dev, int sid,
+               int reg, uint8_t *val)
+{
+       struct tps80031 *tps80031 = dev_get_drvdata(dev);
+       unsigned int ival;
+       int ret;
+
+       ret = regmap_read(tps80031->regmap[sid], reg, &ival);
+       if (ret < 0) {
+               dev_err(dev, "failed reading from reg 0x%02x\n", reg);
+               return ret;
+       }
+
+       *val = ival;
+       return ret;
+}
+
+static inline int tps80031_reads(struct device *dev, int sid,
+               int reg, int len, uint8_t *val)
+{
+       struct tps80031 *tps80031 = dev_get_drvdata(dev);
+
+       return regmap_bulk_read(tps80031->regmap[sid], reg, val, len);
+}
+
+static inline int tps80031_set_bits(struct device *dev, int sid,
+               int reg, uint8_t bit_mask)
+{
+       struct tps80031 *tps80031 = dev_get_drvdata(dev);
+
+       return regmap_update_bits(tps80031->regmap[sid], reg,
+                               bit_mask, bit_mask);
+}
+
+static inline int tps80031_clr_bits(struct device *dev, int sid,
+               int reg, uint8_t bit_mask)
+{
+       struct tps80031 *tps80031 = dev_get_drvdata(dev);
+
+       return regmap_update_bits(tps80031->regmap[sid], reg, bit_mask, 0);
+}
+
+static inline int tps80031_update(struct device *dev, int sid,
+               int reg, uint8_t val, uint8_t mask)
+{
+       struct tps80031 *tps80031 = dev_get_drvdata(dev);
+
+       return regmap_update_bits(tps80031->regmap[sid], reg, mask, val);
+}
+
+static inline unsigned long tps80031_get_chip_info(struct device *dev)
+{
+       struct tps80031 *tps80031 = dev_get_drvdata(dev);
+
+       return tps80031->chip_info;
+}
+
+static inline int tps80031_get_pmu_version(struct device *dev)
+{
+       struct tps80031 *tps80031 = dev_get_drvdata(dev);
+
+       return tps80031->es_version;
+}
+
+static inline int tps80031_irq_get_virq(struct device *dev, int irq)
+{
+       struct tps80031 *tps80031 = dev_get_drvdata(dev);
+
+       return regmap_irq_get_virq(tps80031->irq_data, irq);
+}
+
+extern int tps80031_ext_power_req_config(struct device *dev,
+               unsigned long ext_ctrl_flag, int preq_bit,
+               int state_reg_add, int trans_reg_add);
+#endif /*__LINUX_MFD_TPS80031_H */
index a8eff4a..94ac944 100644 (file)
@@ -207,10 +207,12 @@ struct twl6040_platform_data {
 };
 
 struct regmap;
+struct regmap_irq_chips_data;
 
 struct twl6040 {
        struct device *dev;
        struct regmap *regmap;
+       struct regmap_irq_chip_data *irq_data;
        struct regulator_bulk_data supplies[2]; /* supplies for vio, v2v1 */
        struct mutex mutex;
        struct mutex irq_mutex;
@@ -228,9 +230,8 @@ struct twl6040 {
        unsigned int mclk;
 
        unsigned int irq;
-       unsigned int irq_base;
-       u8 irq_masks_cur;
-       u8 irq_masks_cache;
+       unsigned int irq_ready;
+       unsigned int irq_th;
 };
 
 int twl6040_reg_read(struct twl6040 *twl6040, unsigned int reg);
@@ -245,8 +246,7 @@ int twl6040_set_pll(struct twl6040 *twl6040, int pll_id,
                    unsigned int freq_in, unsigned int freq_out);
 int twl6040_get_pll(struct twl6040 *twl6040);
 unsigned int twl6040_get_sysclk(struct twl6040 *twl6040);
-int twl6040_irq_init(struct twl6040 *twl6040);
-void twl6040_irq_exit(struct twl6040 *twl6040);
+
 /* Get the combined status of the vibra control register */
 int twl6040_get_vibralr_status(struct twl6040 *twl6040);
 
diff --git a/include/linux/mfd/viperboard.h b/include/linux/mfd/viperboard.h
new file mode 100644 (file)
index 0000000..1934528
--- /dev/null
@@ -0,0 +1,110 @@
+/*
+ *  include/linux/mfd/viperboard.h
+ *
+ *  Nano River Technologies viperboard definitions
+ *
+ *  (C) 2012 by Lemonage GmbH
+ *  Author: Lars Poeschel <poeschel@lemonage.de>
+ *  All rights reserved.
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ *
+ */
+
+#ifndef __MFD_VIPERBOARD_H__
+#define __MFD_VIPERBOARD_H__
+
+#include <linux/types.h>
+#include <linux/usb.h>
+
+#define VPRBRD_EP_OUT               0x02
+#define VPRBRD_EP_IN                0x86
+
+#define VPRBRD_I2C_MSG_LEN          512 /* max length of a msg on USB level */
+
+#define VPRBRD_I2C_FREQ_6MHZ        1                        /*   6 MBit/s */
+#define VPRBRD_I2C_FREQ_3MHZ        2                        /*   3 MBit/s */
+#define VPRBRD_I2C_FREQ_1MHZ        3                        /*   1 MBit/s */
+#define VPRBRD_I2C_FREQ_FAST        4                        /* 400 kbit/s */
+#define VPRBRD_I2C_FREQ_400KHZ      VPRBRD_I2C_FREQ_FAST
+#define VPRBRD_I2C_FREQ_200KHZ      5                        /* 200 kbit/s */
+#define VPRBRD_I2C_FREQ_STD         6                        /* 100 kbit/s */
+#define VPRBRD_I2C_FREQ_100KHZ      VPRBRD_I2C_FREQ_STD
+#define VPRBRD_I2C_FREQ_10KHZ       7                        /*  10 kbit/s */
+
+#define VPRBRD_I2C_CMD_WRITE        0x00
+#define VPRBRD_I2C_CMD_READ         0x01
+#define VPRBRD_I2C_CMD_ADDR         0x02
+
+#define VPRBRD_USB_TYPE_OUT        0x40
+#define VPRBRD_USB_TYPE_IN         0xc0
+#define VPRBRD_USB_TIMEOUT_MS       100
+#define VPRBRD_USB_REQUEST_I2C_FREQ 0xe6
+#define VPRBRD_USB_REQUEST_I2C      0xe9
+#define VPRBRD_USB_REQUEST_MAJOR    0xea
+#define VPRBRD_USB_REQUEST_MINOR    0xeb
+#define VPRBRD_USB_REQUEST_ADC      0xec
+#define VPRBRD_USB_REQUEST_GPIOA    0xed
+#define VPRBRD_USB_REQUEST_GPIOB    0xdd
+
+struct vprbrd_i2c_write_hdr {
+       u8 cmd;
+       u16 addr;
+       u8 len1;
+       u8 len2;
+       u8 last;
+       u8 chan;
+       u16 spi;
+} __packed;
+
+struct vprbrd_i2c_read_hdr {
+       u8 cmd;
+       u16 addr;
+       u8 len0;
+       u8 len1;
+       u8 len2;
+       u8 len3;
+       u8 len4;
+       u8 len5;
+       u16 tf1;                        /* transfer 1 length */
+       u16 tf2;                        /* transfer 2 length */
+} __packed;
+
+struct vprbrd_i2c_status {
+       u8 unknown[11];
+       u8 status;
+} __packed;
+
+struct vprbrd_i2c_write_msg {
+       struct vprbrd_i2c_write_hdr header;
+       u8 data[VPRBRD_I2C_MSG_LEN
+               - sizeof(struct vprbrd_i2c_write_hdr)];
+} __packed;
+
+struct vprbrd_i2c_read_msg {
+       struct vprbrd_i2c_read_hdr header;
+       u8 data[VPRBRD_I2C_MSG_LEN
+               - sizeof(struct vprbrd_i2c_read_hdr)];
+} __packed;
+
+struct vprbrd_i2c_addr_msg {
+       u8 cmd;
+       u8 addr;
+       u8 unknown1;
+       u16 len;
+       u8 unknown2;
+       u8 unknown3;
+} __packed;
+
+/* Structure to hold all device specific stuff */
+struct vprbrd {
+       struct usb_device *usb_dev; /* the usb device for this device */
+       struct mutex lock;
+       u8 buf[sizeof(struct vprbrd_i2c_write_msg)];
+       struct platform_device pdev;
+};
+
+#endif /* __MFD_VIPERBOARD_H__ */
index 0b5865c..1e9f627 100644 (file)
@@ -23,6 +23,15 @@ typedef struct page *new_page_t(struct page *, unsigned long private, int **);
 #define MIGRATEPAGE_BALLOON_SUCCESS    1 /* special ret code for balloon page
                                           * sucessful migration case.
                                           */
+enum migrate_reason {
+       MR_COMPACTION,
+       MR_MEMORY_FAILURE,
+       MR_MEMORY_HOTPLUG,
+       MR_SYSCALL,             /* also applies to cpusets */
+       MR_MEMPOLICY_MBIND,
+       MR_NUMA_MISPLACED,
+       MR_CMA
+};
 
 #ifdef CONFIG_MIGRATION
 
@@ -32,7 +41,7 @@ extern int migrate_page(struct address_space *,
                        struct page *, struct page *, enum migrate_mode);
 extern int migrate_pages(struct list_head *l, new_page_t x,
                        unsigned long private, bool offlining,
-                       enum migrate_mode mode);
+                       enum migrate_mode mode, int reason);
 extern int migrate_huge_page(struct page *, new_page_t x,
                        unsigned long private, bool offlining,
                        enum migrate_mode mode);
@@ -54,7 +63,7 @@ static inline void putback_lru_pages(struct list_head *l) {}
 static inline void putback_movable_pages(struct list_head *l) {}
 static inline int migrate_pages(struct list_head *l, new_page_t x,
                unsigned long private, bool offlining,
-               enum migrate_mode mode) { return -ENOSYS; }
+               enum migrate_mode mode, int reason) { return -ENOSYS; }
 static inline int migrate_huge_page(struct page *page, new_page_t x,
                unsigned long private, bool offlining,
                enum migrate_mode mode) { return -ENOSYS; }
@@ -83,4 +92,37 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
 #define fail_migrate_page NULL
 
 #endif /* CONFIG_MIGRATION */
+
+#ifdef CONFIG_NUMA_BALANCING
+extern int migrate_misplaced_page(struct page *page, int node);
+extern int migrate_misplaced_page(struct page *page, int node);
+extern bool migrate_ratelimited(int node);
+#else
+static inline int migrate_misplaced_page(struct page *page, int node)
+{
+       return -EAGAIN; /* can't migrate now */
+}
+static inline bool migrate_ratelimited(int node)
+{
+       return false;
+}
+#endif /* CONFIG_NUMA_BALANCING */
+
+#if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
+extern int migrate_misplaced_transhuge_page(struct mm_struct *mm,
+                       struct vm_area_struct *vma,
+                       pmd_t *pmd, pmd_t entry,
+                       unsigned long address,
+                       struct page *page, int node);
+#else
+static inline int migrate_misplaced_transhuge_page(struct mm_struct *mm,
+                       struct vm_area_struct *vma,
+                       pmd_t *pmd, pmd_t entry,
+                       unsigned long address,
+                       struct page *page, int node)
+{
+       return -EAGAIN;
+}
+#endif /* CONFIG_NUMA_BALANCING && CONFIG_TRANSPARENT_HUGEPAGE*/
+
 #endif /* _LINUX_MIGRATE_H */
index 4af4f0b..7f4f906 100644 (file)
@@ -693,6 +693,36 @@ static inline int page_to_nid(const struct page *page)
 }
 #endif
 
+#ifdef CONFIG_NUMA_BALANCING
+static inline int page_xchg_last_nid(struct page *page, int nid)
+{
+       return xchg(&page->_last_nid, nid);
+}
+
+static inline int page_last_nid(struct page *page)
+{
+       return page->_last_nid;
+}
+static inline void reset_page_last_nid(struct page *page)
+{
+       page->_last_nid = -1;
+}
+#else
+static inline int page_xchg_last_nid(struct page *page, int nid)
+{
+       return page_to_nid(page);
+}
+
+static inline int page_last_nid(struct page *page)
+{
+       return page_to_nid(page);
+}
+
+static inline void reset_page_last_nid(struct page *page)
+{
+}
+#endif
+
 static inline struct zone *page_zone(const struct page *page)
 {
        return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
@@ -1078,6 +1108,9 @@ extern unsigned long move_page_tables(struct vm_area_struct *vma,
 extern unsigned long do_mremap(unsigned long addr,
                               unsigned long old_len, unsigned long new_len,
                               unsigned long flags, unsigned long new_addr);
+extern unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
+                             unsigned long end, pgprot_t newprot,
+                             int dirty_accountable, int prot_numa);
 extern int mprotect_fixup(struct vm_area_struct *vma,
                          struct vm_area_struct **pprev, unsigned long start,
                          unsigned long end, unsigned long newflags);
@@ -1579,6 +1612,11 @@ static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
 }
 #endif
 
+#ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE
+unsigned long change_prot_numa(struct vm_area_struct *vma,
+                       unsigned long start, unsigned long end);
+#endif
+
 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
                        unsigned long pfn, unsigned long size, pgprot_t);
@@ -1600,6 +1638,7 @@ struct page *follow_page(struct vm_area_struct *, unsigned long address,
 #define FOLL_MLOCK     0x40    /* mark page as mlocked */
 #define FOLL_SPLIT     0x80    /* don't return transhuge pages, split them */
 #define FOLL_HWPOISON  0x100   /* check page is hwpoisoned */
+#define FOLL_NUMA      0x200   /* force NUMA hinting page fault */
 
 typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
                        void *data);
index 7ade273..7d9ebb7 100644 (file)
@@ -175,6 +175,10 @@ struct page {
         */
        void *shadow;
 #endif
+
+#ifdef CONFIG_NUMA_BALANCING
+       int _last_nid;
+#endif
 }
 /*
  * The struct page can be forced to be double word aligned so that atomic ops
@@ -411,9 +415,36 @@ struct mm_struct {
 #ifdef CONFIG_CPUMASK_OFFSTACK
        struct cpumask cpumask_allocation;
 #endif
+#ifdef CONFIG_NUMA_BALANCING
+       /*
+        * numa_next_scan is the next time when the PTEs will me marked
+        * pte_numa to gather statistics and migrate pages to new nodes
+        * if necessary
+        */
+       unsigned long numa_next_scan;
+
+       /* numa_next_reset is when the PTE scanner period will be reset */
+       unsigned long numa_next_reset;
+
+       /* Restart point for scanning and setting pte_numa */
+       unsigned long numa_scan_offset;
+
+       /* numa_scan_seq prevents two threads setting pte_numa */
+       int numa_scan_seq;
+
+       /*
+        * The first node a task was scheduled on. If a task runs on
+        * a different node than Make PTE Scan Go Now.
+        */
+       int first_nid;
+#endif
        struct uprobes_state uprobes_state;
 };
 
+/* first nid will either be a valid NID or one of these values */
+#define NUMA_PTE_SCAN_INIT     -1
+#define NUMA_PTE_SCAN_ACTIVE   -2
+
 static inline void mm_init_cpumask(struct mm_struct *mm)
 {
 #ifdef CONFIG_CPUMASK_OFFSTACK
index cd55dad..4bec5be 100644 (file)
@@ -735,6 +735,19 @@ typedef struct pglist_data {
        struct task_struct *kswapd;     /* Protected by lock_memory_hotplug() */
        int kswapd_max_order;
        enum zone_type classzone_idx;
+#ifdef CONFIG_NUMA_BALANCING
+       /*
+        * Lock serializing the per destination node AutoNUMA memory
+        * migration rate limiting data.
+        */
+       spinlock_t numabalancing_migrate_lock;
+
+       /* Rate limiting time interval */
+       unsigned long numabalancing_migrate_next_window;
+
+       /* Number of pages migrated during the rate limiting time interval */
+       unsigned long numabalancing_migrate_nr_pages;
+#endif
 } pg_data_t;
 
 #define node_present_pages(nid)        (NODE_DATA(nid)->node_present_pages)
diff --git a/include/linux/platform_data/ti_am335x_adc.h b/include/linux/platform_data/ti_am335x_adc.h
new file mode 100644 (file)
index 0000000..e41d583
--- /dev/null
@@ -0,0 +1,14 @@
+#ifndef __LINUX_TI_AM335X_ADC_H
+#define __LINUX_TI_AM335X_ADC_H
+
+/**
+ * struct adc_data     ADC Input information
+ * @adc_channels:      Number of analog inputs
+ *                     available for ADC.
+ */
+
+struct adc_data {
+       unsigned int adc_channels;
+};
+
+#endif
index bfe1f47..c20635c 100644 (file)
@@ -7,7 +7,7 @@
 #include <linux/list.h>
 #include <linux/slab.h>
 #include <linux/mm.h>
-#include <linux/mutex.h>
+#include <linux/rwsem.h>
 #include <linux/memcontrol.h>
 
 /*
@@ -25,8 +25,8 @@
  * pointing to this anon_vma once its vma list is empty.
  */
 struct anon_vma {
-       struct anon_vma *root;  /* Root of this anon_vma tree */
-       struct mutex mutex;     /* Serialize access to vma list */
+       struct anon_vma *root;          /* Root of this anon_vma tree */
+       struct rw_semaphore rwsem;      /* W: modification, R: walking the list */
        /*
         * The refcount is taken on an anon_vma when there is no
         * guarantee that the vma of page tables will exist for
@@ -64,7 +64,7 @@ struct anon_vma_chain {
        struct vm_area_struct *vma;
        struct anon_vma *anon_vma;
        struct list_head same_vma;   /* locked by mmap_sem & page_table_lock */
-       struct rb_node rb;                      /* locked by anon_vma->mutex */
+       struct rb_node rb;                      /* locked by anon_vma->rwsem */
        unsigned long rb_subtree_last;
 #ifdef CONFIG_DEBUG_VM_RB
        unsigned long cached_vma_start, cached_vma_last;
@@ -108,26 +108,37 @@ static inline void vma_lock_anon_vma(struct vm_area_struct *vma)
 {
        struct anon_vma *anon_vma = vma->anon_vma;
        if (anon_vma)
-               mutex_lock(&anon_vma->root->mutex);
+               down_write(&anon_vma->root->rwsem);
 }
 
 static inline void vma_unlock_anon_vma(struct vm_area_struct *vma)
 {
        struct anon_vma *anon_vma = vma->anon_vma;
        if (anon_vma)
-               mutex_unlock(&anon_vma->root->mutex);
+               up_write(&anon_vma->root->rwsem);
 }
 
-static inline void anon_vma_lock(struct anon_vma *anon_vma)
+static inline void anon_vma_lock_write(struct anon_vma *anon_vma)
 {
-       mutex_lock(&anon_vma->root->mutex);
+       down_write(&anon_vma->root->rwsem);
 }
 
 static inline void anon_vma_unlock(struct anon_vma *anon_vma)
 {
-       mutex_unlock(&anon_vma->root->mutex);
+       up_write(&anon_vma->root->rwsem);
 }
 
+static inline void anon_vma_lock_read(struct anon_vma *anon_vma)
+{
+       down_read(&anon_vma->root->rwsem);
+}
+
+static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
+{
+       up_read(&anon_vma->root->rwsem);
+}
+
+
 /*
  * anon_vma helper functions.
  */
@@ -220,8 +231,8 @@ int try_to_munlock(struct page *);
 /*
  * Called by memory-failure.c to kill processes.
  */
-struct anon_vma *page_lock_anon_vma(struct page *page);
-void page_unlock_anon_vma(struct anon_vma *anon_vma);
+struct anon_vma *page_lock_anon_vma_read(struct page *page);
+void page_unlock_anon_vma_read(struct anon_vma *anon_vma);
 int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
 
 /*
index 2c2f307..b089c92 100644 (file)
@@ -1527,6 +1527,14 @@ struct task_struct {
        short il_next;
        short pref_node_fork;
 #endif
+#ifdef CONFIG_NUMA_BALANCING
+       int numa_scan_seq;
+       int numa_migrate_seq;
+       unsigned int numa_scan_period;
+       u64 node_stamp;                 /* migration stamp  */
+       struct callback_head numa_work;
+#endif /* CONFIG_NUMA_BALANCING */
+
        struct rcu_head rcu;
 
        /*
@@ -1601,6 +1609,18 @@ struct task_struct {
 /* Future-safe accessor for struct task_struct's cpus_allowed. */
 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
 
+#ifdef CONFIG_NUMA_BALANCING
+extern void task_numa_fault(int node, int pages, bool migrated);
+extern void set_numabalancing_state(bool enabled);
+#else
+static inline void task_numa_fault(int node, int pages, bool migrated)
+{
+}
+static inline void set_numabalancing_state(bool enabled)
+{
+}
+#endif
+
 /*
  * Priority of a process goes from 0..MAX_PRIO-1, valid RT
  * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH
@@ -2030,6 +2050,13 @@ enum sched_tunable_scaling {
 };
 extern enum sched_tunable_scaling sysctl_sched_tunable_scaling;
 
+extern unsigned int sysctl_numa_balancing_scan_delay;
+extern unsigned int sysctl_numa_balancing_scan_period_min;
+extern unsigned int sysctl_numa_balancing_scan_period_max;
+extern unsigned int sysctl_numa_balancing_scan_period_reset;
+extern unsigned int sysctl_numa_balancing_scan_size;
+extern unsigned int sysctl_numa_balancing_settle_count;
+
 #ifdef CONFIG_SCHED_DEBUG
 extern unsigned int sysctl_sched_migration_cost;
 extern unsigned int sysctl_sched_nr_migrate;
index 8d08b3e..071d62c 100644 (file)
@@ -34,21 +34,25 @@ enum dma_sync_target {
        SYNC_FOR_CPU = 0,
        SYNC_FOR_DEVICE = 1,
 };
-extern void *swiotlb_tbl_map_single(struct device *hwdev, dma_addr_t tbl_dma_addr,
-                                   phys_addr_t phys, size_t size,
-                                   enum dma_data_direction dir);
 
-extern void swiotlb_tbl_unmap_single(struct device *hwdev, char *dma_addr,
+/* define the last possible byte of physical address space as a mapping error */
+#define SWIOTLB_MAP_ERROR (~(phys_addr_t)0x0)
+
+extern phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
+                                         dma_addr_t tbl_dma_addr,
+                                         phys_addr_t phys, size_t size,
+                                         enum dma_data_direction dir);
+
+extern void swiotlb_tbl_unmap_single(struct device *hwdev,
+                                    phys_addr_t tlb_addr,
                                     size_t size, enum dma_data_direction dir);
 
-extern void swiotlb_tbl_sync_single(struct device *hwdev, char *dma_addr,
+extern void swiotlb_tbl_sync_single(struct device *hwdev,
+                                   phys_addr_t tlb_addr,
                                    size_t size, enum dma_data_direction dir,
                                    enum dma_sync_target target);
 
 /* Accessory functions. */
-extern void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size,
-                          enum dma_data_direction dir);
-
 extern void
 *swiotlb_alloc_coherent(struct device *hwdev, size_t size,
                        dma_addr_t *dma_handle, gfp_t flags);
index fe786f0..fce0a27 100644 (file)
@@ -38,8 +38,18 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
                KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY,
                KSWAPD_SKIP_CONGESTION_WAIT,
                PAGEOUTRUN, ALLOCSTALL, PGROTATED,
+#ifdef CONFIG_NUMA_BALANCING
+               NUMA_PTE_UPDATES,
+               NUMA_HINT_FAULTS,
+               NUMA_HINT_FAULTS_LOCAL,
+               NUMA_PAGE_MIGRATE,
+#endif
+#ifdef CONFIG_MIGRATION
+               PGMIGRATE_SUCCESS, PGMIGRATE_FAIL,
+#endif
 #ifdef CONFIG_COMPACTION
-               COMPACTBLOCKS, COMPACTPAGES, COMPACTPAGEFAILED,
+               COMPACTMIGRATE_SCANNED, COMPACTFREE_SCANNED,
+               COMPACTISOLATED,
                COMPACTSTALL, COMPACTFAIL, COMPACTSUCCESS,
 #endif
 #ifdef CONFIG_HUGETLB_PAGE
index 92a86b2..a13291f 100644 (file)
@@ -80,6 +80,14 @@ static inline void vm_events_fold_cpu(int cpu)
 
 #endif /* CONFIG_VM_EVENT_COUNTERS */
 
+#ifdef CONFIG_NUMA_BALANCING
+#define count_vm_numa_event(x)     count_vm_event(x)
+#define count_vm_numa_events(x, y) count_vm_events(x, y)
+#else
+#define count_vm_numa_event(x) do {} while (0)
+#define count_vm_numa_events(x, y) do {} while (0)
+#endif /* CONFIG_NUMA_BALANCING */
+
 #define __count_zone_vm_events(item, zone, delta) \
                __count_vm_events(item##_NORMAL - ZONE_NORMAL + \
                zone_idx(zone), delta)
index 2acd540..5079109 100644 (file)
@@ -9,6 +9,8 @@ struct se_subsystem_api {
        struct list_head sub_api_list;
 
        char name[16];
+       char inquiry_prod[16];
+       char inquiry_rev[4];
        struct module *owner;
 
        u8 transport_type;
@@ -16,46 +18,45 @@ struct se_subsystem_api {
        int (*attach_hba)(struct se_hba *, u32);
        void (*detach_hba)(struct se_hba *);
        int (*pmode_enable_hba)(struct se_hba *, unsigned long);
-       void *(*allocate_virtdevice)(struct se_hba *, const char *);
-       struct se_device *(*create_virtdevice)(struct se_hba *,
-                               struct se_subsystem_dev *, void *);
-       void (*free_device)(void *);
+
+       struct se_device *(*alloc_device)(struct se_hba *, const char *);
+       int (*configure_device)(struct se_device *);
+       void (*free_device)(struct se_device *device);
+
+       ssize_t (*set_configfs_dev_params)(struct se_device *,
+                                          const char *, ssize_t);
+       ssize_t (*show_configfs_dev_params)(struct se_device *, char *);
+
        void (*transport_complete)(struct se_cmd *cmd,
                                   struct scatterlist *,
                                   unsigned char *);
 
-       int (*parse_cdb)(struct se_cmd *cmd);
-       ssize_t (*check_configfs_dev_params)(struct se_hba *,
-                       struct se_subsystem_dev *);
-       ssize_t (*set_configfs_dev_params)(struct se_hba *,
-                       struct se_subsystem_dev *, const char *, ssize_t);
-       ssize_t (*show_configfs_dev_params)(struct se_hba *,
-                       struct se_subsystem_dev *, char *);
-       u32 (*get_device_rev)(struct se_device *);
+       sense_reason_t (*parse_cdb)(struct se_cmd *cmd);
        u32 (*get_device_type)(struct se_device *);
        sector_t (*get_blocks)(struct se_device *);
        unsigned char *(*get_sense_buffer)(struct se_cmd *);
 };
 
-struct spc_ops {
-       int (*execute_rw)(struct se_cmd *cmd);
-       int (*execute_sync_cache)(struct se_cmd *cmd);
-       int (*execute_write_same)(struct se_cmd *cmd);
-       int (*execute_unmap)(struct se_cmd *cmd);
+struct sbc_ops {
+       sense_reason_t (*execute_rw)(struct se_cmd *cmd);
+       sense_reason_t (*execute_sync_cache)(struct se_cmd *cmd);
+       sense_reason_t (*execute_write_same)(struct se_cmd *cmd);
+       sense_reason_t (*execute_write_same_unmap)(struct se_cmd *cmd);
+       sense_reason_t (*execute_unmap)(struct se_cmd *cmd);
 };
 
 int    transport_subsystem_register(struct se_subsystem_api *);
 void   transport_subsystem_release(struct se_subsystem_api *);
 
-struct se_device *transport_add_device_to_core_hba(struct se_hba *,
-               struct se_subsystem_api *, struct se_subsystem_dev *, u32,
-               void *, struct se_dev_limits *, const char *, const char *);
-
 void   target_complete_cmd(struct se_cmd *, u8);
 
-int    sbc_parse_cdb(struct se_cmd *cmd, struct spc_ops *ops);
-int    spc_parse_cdb(struct se_cmd *cmd, unsigned int *size);
-int    spc_get_write_same_sectors(struct se_cmd *cmd);
+sense_reason_t spc_parse_cdb(struct se_cmd *cmd, unsigned int *size);
+sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd);
+sector_t       spc_get_write_same_sectors(struct se_cmd *cmd);
+
+sense_reason_t sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops);
+u32    sbc_get_device_rev(struct se_device *dev);
+u32    sbc_get_device_type(struct se_device *dev);
 
 void   transport_set_vpd_proto_id(struct t10_vpd *, unsigned char *);
 int    transport_set_vpd_assoc(struct t10_vpd *, unsigned char *);
index 5be8937..7cae236 100644 (file)
 
 #define PYX_TRANSPORT_STATUS_INTERVAL          5 /* In seconds */
 
-/*
- * struct se_subsystem_dev->su_dev_flags
-*/
-#define SDF_FIRMWARE_VPD_UNIT_SERIAL           0x00000001
-#define SDF_EMULATED_VPD_UNIT_SERIAL           0x00000002
-#define SDF_USING_UDEV_PATH                    0x00000004
-#define SDF_USING_ALIAS                                0x00000008
-
-/*
- * struct se_device->dev_flags
- */
-#define DF_SPC2_RESERVATIONS                   0x00000001
-#define DF_SPC2_RESERVATIONS_WITH_ISID         0x00000002
-
 /* struct se_dev_attrib sanity values */
 /* Default max_unmap_lba_count */
 #define DA_MAX_UNMAP_LBA_COUNT                 0
@@ -85,6 +71,8 @@
 #define DA_UNMAP_GRANULARITY_DEFAULT           0
 /* Default unmap_granularity_alignment */
 #define DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT 0
+/* Default max_write_same_len, disabled by default */
+#define DA_MAX_WRITE_SAME_LEN                  0
 /* Default max transfer length */
 #define DA_FABRIC_MAX_SECTORS                  8192
 /* Emulation for Direct Page Out */
  */
 #define DA_EMULATE_TPWS                                0
 /* No Emulation for PSCSI by default */
-#define DA_EMULATE_RESERVATIONS                        0
-/* No Emulation for PSCSI by default */
 #define DA_EMULATE_ALUA                                0
 /* Enforce SCSI Initiator Port TransportID with 'ISID' for PR */
 #define DA_ENFORCE_PR_ISIDS                    1
@@ -160,8 +146,6 @@ enum se_cmd_flags_table {
        SCF_EMULATED_TASK_SENSE         = 0x00000004,
        SCF_SCSI_DATA_CDB               = 0x00000008,
        SCF_SCSI_TMR_CDB                = 0x00000010,
-       SCF_SCSI_CDB_EXCEPTION          = 0x00000020,
-       SCF_SCSI_RESERVATION_CONFLICT   = 0x00000040,
        SCF_FUA                         = 0x00000080,
        SCF_SE_LUN_CMD                  = 0x00000100,
        SCF_BIDI                        = 0x00000400,
@@ -182,38 +166,33 @@ enum transport_lunflags_table {
        TRANSPORT_LUNFLAGS_READ_WRITE           = 0x04,
 };
 
-/* struct se_device->dev_status */
-enum transport_device_status_table {
-       TRANSPORT_DEVICE_ACTIVATED              = 0x01,
-       TRANSPORT_DEVICE_DEACTIVATED            = 0x02,
-       TRANSPORT_DEVICE_QUEUE_FULL             = 0x04,
-       TRANSPORT_DEVICE_SHUTDOWN               = 0x08,
-       TRANSPORT_DEVICE_OFFLINE_ACTIVATED      = 0x10,
-       TRANSPORT_DEVICE_OFFLINE_DEACTIVATED    = 0x20,
-};
-
 /*
- * Used by transport_send_check_condition_and_sense() and se_cmd->scsi_sense_reason
+ * Used by transport_send_check_condition_and_sense()
  * to signal which ASC/ASCQ sense payload should be built.
  */
+typedef unsigned __bitwise__ sense_reason_t;
+
 enum tcm_sense_reason_table {
-       TCM_NON_EXISTENT_LUN                    = 0x01,
-       TCM_UNSUPPORTED_SCSI_OPCODE             = 0x02,
-       TCM_INCORRECT_AMOUNT_OF_DATA            = 0x03,
-       TCM_UNEXPECTED_UNSOLICITED_DATA         = 0x04,
-       TCM_SERVICE_CRC_ERROR                   = 0x05,
-       TCM_SNACK_REJECTED                      = 0x06,
-       TCM_SECTOR_COUNT_TOO_MANY               = 0x07,
-       TCM_INVALID_CDB_FIELD                   = 0x08,
-       TCM_INVALID_PARAMETER_LIST              = 0x09,
-       TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE  = 0x0a,
-       TCM_UNKNOWN_MODE_PAGE                   = 0x0b,
-       TCM_WRITE_PROTECTED                     = 0x0c,
-       TCM_CHECK_CONDITION_ABORT_CMD           = 0x0d,
-       TCM_CHECK_CONDITION_UNIT_ATTENTION      = 0x0e,
-       TCM_CHECK_CONDITION_NOT_READY           = 0x0f,
-       TCM_RESERVATION_CONFLICT                = 0x10,
-       TCM_ADDRESS_OUT_OF_RANGE                = 0x11,
+#define R(x)   (__force sense_reason_t )(x)
+       TCM_NON_EXISTENT_LUN                    = R(0x01),
+       TCM_UNSUPPORTED_SCSI_OPCODE             = R(0x02),
+       TCM_INCORRECT_AMOUNT_OF_DATA            = R(0x03),
+       TCM_UNEXPECTED_UNSOLICITED_DATA         = R(0x04),
+       TCM_SERVICE_CRC_ERROR                   = R(0x05),
+       TCM_SNACK_REJECTED                      = R(0x06),
+       TCM_SECTOR_COUNT_TOO_MANY               = R(0x07),
+       TCM_INVALID_CDB_FIELD                   = R(0x08),
+       TCM_INVALID_PARAMETER_LIST              = R(0x09),
+       TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE  = R(0x0a),
+       TCM_UNKNOWN_MODE_PAGE                   = R(0x0b),
+       TCM_WRITE_PROTECTED                     = R(0x0c),
+       TCM_CHECK_CONDITION_ABORT_CMD           = R(0x0d),
+       TCM_CHECK_CONDITION_UNIT_ATTENTION      = R(0x0e),
+       TCM_CHECK_CONDITION_NOT_READY           = R(0x0f),
+       TCM_RESERVATION_CONFLICT                = R(0x10),
+       TCM_ADDRESS_OUT_OF_RANGE                = R(0x11),
+       TCM_OUT_OF_RESOURCES                    = R(0x12),
+#undef R
 };
 
 enum target_sc_flags_table {
@@ -246,30 +225,6 @@ enum tcm_tmrsp_table {
        TMR_FUNCTION_REJECTED           = 255,
 };
 
-struct se_obj {
-       atomic_t obj_access_count;
-};
-
-/*
- * Used by TCM Core internally to signal if ALUA emulation is enabled or
- * disabled, or running in with TCM/pSCSI passthrough mode
- */
-typedef enum {
-       SPC_ALUA_PASSTHROUGH,
-       SPC2_ALUA_DISABLED,
-       SPC3_ALUA_EMULATED
-} t10_alua_index_t;
-
-/*
- * Used by TCM Core internally to signal if SAM Task Attribute emulation
- * is enabled or disabled, or running in with TCM/pSCSI passthrough mode
- */
-typedef enum {
-       SAM_TASK_ATTR_PASSTHROUGH,
-       SAM_TASK_ATTR_UNTAGGED,
-       SAM_TASK_ATTR_EMULATED
-} t10_task_attr_index_t;
-
 /*
  * Used for target SCSI statistics
  */
@@ -283,17 +238,15 @@ typedef enum {
 struct se_cmd;
 
 struct t10_alua {
-       t10_alua_index_t alua_type;
        /* ALUA Target Port Group ID */
        u16     alua_tg_pt_gps_counter;
        u32     alua_tg_pt_gps_count;
        spinlock_t tg_pt_gps_lock;
-       struct se_subsystem_dev *t10_sub_dev;
+       struct se_device *t10_dev;
        /* Used for default ALUA Target Port Group */
        struct t10_alua_tg_pt_gp *default_tg_pt_gp;
        /* Used for default ALUA Target Port Group ConfigFS group */
        struct config_group alua_tg_pt_gps_group;
-       int (*alua_state_check)(struct se_cmd *, unsigned char *, u8 *);
        struct list_head tg_pt_gps_list;
 };
 
@@ -335,7 +288,7 @@ struct t10_alua_tg_pt_gp {
        atomic_t tg_pt_gp_ref_cnt;
        spinlock_t tg_pt_gp_lock;
        struct mutex tg_pt_gp_md_mutex;
-       struct se_subsystem_dev *tg_pt_gp_su_dev;
+       struct se_device *tg_pt_gp_dev;
        struct config_group tg_pt_gp_group;
        struct list_head tg_pt_gp_list;
        struct list_head tg_pt_gp_mem_list;
@@ -366,23 +319,11 @@ struct t10_wwn {
        char revision[4];
        char unit_serial[INQUIRY_VPD_SERIAL_LEN];
        spinlock_t t10_vpd_lock;
-       struct se_subsystem_dev *t10_sub_dev;
+       struct se_device *t10_dev;
        struct config_group t10_wwn_group;
        struct list_head t10_vpd_list;
 };
 
-
-/*
- * Used by TCM Core internally to signal if >= SPC-3 persistent reservations
- * emulation is enabled or disabled, or running in with TCM/pSCSI passthrough
- * mode
- */
-typedef enum {
-       SPC_PASSTHROUGH,
-       SPC2_RESERVATIONS,
-       SPC3_PERSISTENT_RESERVATIONS
-} t10_reservations_index_t;
-
 struct t10_pr_registration {
        /* Used for fabrics that contain WWN+ISID */
 #define PR_REG_ISID_LEN                                16
@@ -424,18 +365,6 @@ struct t10_pr_registration {
        struct list_head pr_reg_atp_mem_list;
 };
 
-/*
- * This set of function pointer ops is set based upon SPC3_PERSISTENT_RESERVATIONS,
- * SPC2_RESERVATIONS or SPC_PASSTHROUGH in drivers/target/target_core_pr.c:
- * core_setup_reservations()
- */
-struct t10_reservation_ops {
-       int (*t10_reservation_check)(struct se_cmd *, u32 *);
-       int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
-       int (*t10_pr_register)(struct se_cmd *);
-       int (*t10_pr_clear)(struct se_cmd *);
-};
-
 struct t10_reservation {
        /* Reservation effects all target ports */
        int pr_all_tg_pt;
@@ -446,7 +375,6 @@ struct t10_reservation {
 #define PR_APTPL_BUF_LEN                       8192
        u32 pr_aptpl_buf_len;
        u32 pr_generation;
-       t10_reservations_index_t res_type;
        spinlock_t registration_lock;
        spinlock_t aptpl_reg_lock;
        /*
@@ -462,7 +390,6 @@ struct t10_reservation {
        struct se_node_acl *pr_res_holder;
        struct list_head registration_list;
        struct list_head aptpl_reg_list;
-       struct t10_reservation_ops pr_ops;
 };
 
 struct se_tmr_req {
@@ -485,7 +412,6 @@ struct se_cmd {
        u8                      scsi_status;
        u8                      scsi_asc;
        u8                      scsi_ascq;
-       u8                      scsi_sense_reason;
        u16                     scsi_sense_length;
        /* Delay for ALUA Active/NonOptimized state access in milliseconds */
        int                     alua_nonop_delay;
@@ -523,7 +449,7 @@ struct se_cmd {
        struct completion       cmd_wait_comp;
        struct kref             cmd_kref;
        struct target_core_fabric_ops *se_tfo;
-       int (*execute_cmd)(struct se_cmd *);
+       sense_reason_t          (*execute_cmd)(struct se_cmd *);
        void (*transport_complete_callback)(struct se_cmd *);
 
        unsigned char           *t_task_cdb;
@@ -581,6 +507,8 @@ struct se_node_acl {
        bool                    acl_stop:1;
        u32                     queue_depth;
        u32                     acl_index;
+#define MAX_ACL_TAG_SIZE 64
+       char                    acl_tag[MAX_ACL_TAG_SIZE];
        u64                     num_cmds;
        u64                     read_bytes;
        u64                     write_bytes;
@@ -662,15 +590,6 @@ struct se_dev_entry {
        struct list_head        ua_list;
 };
 
-struct se_dev_limits {
-       /* Max supported HW queue depth */
-       u32             hw_queue_depth;
-       /* Max supported virtual queue depth */
-       u32             queue_depth;
-       /* From include/linux/blkdev.h for the other HW/SW limits. */
-       struct queue_limits limits;
-};
-
 struct se_dev_attrib {
        int             emulate_dpo;
        int             emulate_fua_write;
@@ -680,8 +599,6 @@ struct se_dev_attrib {
        int             emulate_tas;
        int             emulate_tpu;
        int             emulate_tpws;
-       int             emulate_reservations;
-       int             emulate_alua;
        int             enforce_pr_isids;
        int             is_nonrot;
        int             emulate_rest_reord;
@@ -696,7 +613,8 @@ struct se_dev_attrib {
        u32             max_unmap_block_desc_count;
        u32             unmap_granularity;
        u32             unmap_granularity_alignment;
-       struct se_subsystem_dev *da_sub_dev;
+       u32             max_write_same_len;
+       struct se_device *da_dev;
        struct config_group da_group;
 };
 
@@ -707,48 +625,25 @@ struct se_dev_stat_grps {
        struct config_group scsi_lu_group;
 };
 
-struct se_subsystem_dev {
-/* Used for struct se_subsystem_dev-->se_dev_alias, must be less than PAGE_SIZE */
-#define SE_DEV_ALIAS_LEN               512
-       unsigned char   se_dev_alias[SE_DEV_ALIAS_LEN];
-/* Used for struct se_subsystem_dev->se_dev_udev_path[], must be less than PAGE_SIZE */
-#define SE_UDEV_PATH_LEN               512
-       unsigned char   se_dev_udev_path[SE_UDEV_PATH_LEN];
-       u32             su_dev_flags;
-       struct se_hba *se_dev_hba;
-       struct se_device *se_dev_ptr;
-       struct se_dev_attrib se_dev_attrib;
-       /* T10 Asymmetric Logical Unit Assignment for Target Ports */
-       struct t10_alua t10_alua;
-       /* T10 Inquiry and VPD WWN Information */
-       struct t10_wwn  t10_wwn;
-       /* T10 SPC-2 + SPC-3 Reservations */
-       struct t10_reservation t10_pr;
-       spinlock_t      se_dev_lock;
-       void            *se_dev_su_ptr;
-       struct config_group se_dev_group;
-       /* For T10 Reservations */
-       struct config_group se_dev_pr_group;
-       /* For target_core_stat.c groups */
-       struct se_dev_stat_grps dev_stat_grps;
-};
-
 struct se_device {
+#define SE_DEV_LINK_MAGIC                      0xfeeddeef
+       u32                     dev_link_magic;
        /* RELATIVE TARGET PORT IDENTIFER Counter */
        u16                     dev_rpti_counter;
        /* Used for SAM Task Attribute ordering */
        u32                     dev_cur_ordered_id;
        u32                     dev_flags;
+#define DF_CONFIGURED                          0x00000001
+#define DF_FIRMWARE_VPD_UNIT_SERIAL            0x00000002
+#define DF_EMULATED_VPD_UNIT_SERIAL            0x00000004
+#define DF_USING_UDEV_PATH                     0x00000008
+#define DF_USING_ALIAS                         0x00000010
        u32                     dev_port_count;
-       /* See transport_device_status_table */
-       u32                     dev_status;
        /* Physical device queue depth */
        u32                     queue_depth;
        /* Used for SPC-2 reservations enforce of ISIDs */
        u64                     dev_res_bin_isid;
-       t10_task_attr_index_t   dev_task_attr_type;
        /* Pointer to transport specific device structure */
-       void                    *dev_ptr;
        u32                     dev_index;
        u64                     creation_time;
        u32                     num_resets;
@@ -761,13 +656,13 @@ struct se_device {
        atomic_t                dev_ordered_id;
        atomic_t                dev_ordered_sync;
        atomic_t                dev_qf_count;
-       struct se_obj           dev_obj;
-       struct se_obj           dev_access_obj;
-       struct se_obj           dev_export_obj;
+       int                     export_count;
        spinlock_t              delayed_cmd_lock;
        spinlock_t              execute_task_lock;
        spinlock_t              dev_reservation_lock;
-       spinlock_t              dev_status_lock;
+       unsigned int            dev_reservation_flags;
+#define DRF_SPC2_RESERVATIONS                  0x00000001
+#define DRF_SPC2_RESERVATIONS_WITH_ISID                0x00000002
        spinlock_t              se_port_lock;
        spinlock_t              se_tmr_lock;
        spinlock_t              qf_cmd_lock;
@@ -786,7 +681,20 @@ struct se_device {
        struct list_head        qf_cmd_list;
        /* Pointer to associated SE HBA */
        struct se_hba           *se_hba;
-       struct se_subsystem_dev *se_sub_dev;
+       /* T10 Inquiry and VPD WWN Information */
+       struct t10_wwn          t10_wwn;
+       /* T10 Asymmetric Logical Unit Assignment for Target Ports */
+       struct t10_alua         t10_alua;
+       /* T10 SPC-2 + SPC-3 Reservations */
+       struct t10_reservation  t10_pr;
+       struct se_dev_attrib    dev_attrib;
+       struct config_group     dev_group;
+       struct config_group     dev_pr_group;
+       struct se_dev_stat_grps dev_stat_grps;
+#define SE_DEV_ALIAS_LEN 512           /* must be less than PAGE_SIZE */
+       unsigned char           dev_alias[SE_DEV_ALIAS_LEN];
+#define SE_UDEV_PATH_LEN 512           /* must be less than PAGE_SIZE */
+       unsigned char           udev_path[SE_UDEV_PATH_LEN];
        /* Pointer to template of function pointers for transport */
        struct se_subsystem_api *transport;
        /* Linked list for struct se_hba struct se_device list */
@@ -803,8 +711,6 @@ struct se_hba {
        u32                     hba_index;
        /* Pointer to transport specific host structure. */
        void                    *hba_ptr;
-       /* Linked list for struct se_device */
-       struct list_head        hba_dev_list;
        struct list_head        hba_node;
        spinlock_t              device_lock;
        struct config_group     hba_group;
@@ -820,6 +726,8 @@ struct se_port_stat_grps {
 };
 
 struct se_lun {
+#define SE_LUN_LINK_MAGIC                      0xffff7771
+       u32                     lun_link_magic;
        /* See transport_lun_status_table */
        enum transport_lun_status_table lun_status;
        u32                     lun_access;
index 81ddb4a..aaa1ee6 100644 (file)
@@ -98,8 +98,8 @@ void  transport_deregister_session(struct se_session *);
 
 void   transport_init_se_cmd(struct se_cmd *, struct target_core_fabric_ops *,
                struct se_session *, u32, int, int, unsigned char *);
-int    transport_lookup_cmd_lun(struct se_cmd *, u32);
-int    target_setup_cmd_from_cdb(struct se_cmd *, unsigned char *);
+sense_reason_t transport_lookup_cmd_lun(struct se_cmd *, u32);
+sense_reason_t target_setup_cmd_from_cdb(struct se_cmd *, unsigned char *);
 int    target_submit_cmd_map_sgls(struct se_cmd *, struct se_session *,
                unsigned char *, unsigned char *, u32, u32, int, int, int,
                struct scatterlist *, u32, struct scatterlist *, u32);
@@ -110,9 +110,7 @@ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
                void *fabric_tmr_ptr, unsigned char tm_type,
                gfp_t, unsigned int, int);
 int    transport_handle_cdb_direct(struct se_cmd *);
-int    transport_generic_map_mem_to_cmd(struct se_cmd *cmd,
-               struct scatterlist *, u32, struct scatterlist *, u32);
-int    transport_generic_new_cmd(struct se_cmd *);
+sense_reason_t transport_generic_new_cmd(struct se_cmd *);
 
 void   target_execute_cmd(struct se_cmd *cmd);
 
@@ -120,7 +118,8 @@ void        transport_generic_free_cmd(struct se_cmd *, int);
 
 bool   transport_wait_for_tasks(struct se_cmd *);
 int    transport_check_aborted_status(struct se_cmd *, int);
-int    transport_send_check_condition_and_sense(struct se_cmd *, u8, int);
+int    transport_send_check_condition_and_sense(struct se_cmd *,
+               sense_reason_t, int);
 
 int    target_put_sess_cmd(struct se_session *, struct se_cmd *);
 void   target_sess_cmd_list_set_waiting(struct se_session *);
@@ -131,7 +130,7 @@ int core_alua_check_nonop_delay(struct se_cmd *);
 int    core_tmr_alloc_req(struct se_cmd *, void *, u8, gfp_t);
 void   core_tmr_release_req(struct se_tmr_req *);
 int    transport_generic_handle_tmr(struct se_cmd *);
-void   transport_generic_request_failure(struct se_cmd *);
+void   transport_generic_request_failure(struct se_cmd *, sense_reason_t);
 int    transport_lookup_tmr_lun(struct se_cmd *, u32);
 
 struct se_node_acl *core_tpg_check_initiator_node_acl(struct se_portal_group *,
@@ -143,6 +142,8 @@ int core_tpg_del_initiator_node_acl(struct se_portal_group *,
                struct se_node_acl *, int);
 int    core_tpg_set_initiator_node_queue_depth(struct se_portal_group *,
                unsigned char *, u32, int);
+int    core_tpg_set_initiator_node_tag(struct se_portal_group *,
+               struct se_node_acl *, const char *);
 int    core_tpg_register(struct target_core_fabric_ops *, struct se_wwn *,
                struct se_portal_group *, void *, int);
 int    core_tpg_deregister(struct se_portal_group *);
index d49b285..f6372b0 100644 (file)
@@ -15,6 +15,7 @@ struct ext4_inode_info;
 struct mpage_da_data;
 struct ext4_map_blocks;
 struct ext4_extent;
+struct extent_status;
 
 #define EXT4_I(inode) (container_of(inode, struct ext4_inode_info, vfs_inode))
 
@@ -1519,10 +1520,9 @@ DEFINE_EVENT(ext4__map_blocks_enter, ext4_ind_map_blocks_enter,
 );
 
 DECLARE_EVENT_CLASS(ext4__map_blocks_exit,
-       TP_PROTO(struct inode *inode, ext4_lblk_t lblk,
-                ext4_fsblk_t pblk, unsigned int len, int ret),
+       TP_PROTO(struct inode *inode, struct ext4_map_blocks *map, int ret),
 
-       TP_ARGS(inode, lblk, pblk, len, ret),
+       TP_ARGS(inode, map, ret),
 
        TP_STRUCT__entry(
                __field(        dev_t,          dev             )
@@ -1530,37 +1530,37 @@ DECLARE_EVENT_CLASS(ext4__map_blocks_exit,
                __field(        ext4_fsblk_t,   pblk            )
                __field(        ext4_lblk_t,    lblk            )
                __field(        unsigned int,   len             )
+               __field(        unsigned int,   flags           )
                __field(        int,            ret             )
        ),
 
        TP_fast_assign(
                __entry->dev    = inode->i_sb->s_dev;
                __entry->ino    = inode->i_ino;
-               __entry->pblk   = pblk;
-               __entry->lblk   = lblk;
-               __entry->len    = len;
+               __entry->pblk   = map->m_pblk;
+               __entry->lblk   = map->m_lblk;
+               __entry->len    = map->m_len;
+               __entry->flags  = map->m_flags;
                __entry->ret    = ret;
        ),
 
-       TP_printk("dev %d,%d ino %lu lblk %u pblk %llu len %u ret %d",
+       TP_printk("dev %d,%d ino %lu lblk %u pblk %llu len %u flags %x ret %d",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  (unsigned long) __entry->ino,
                  __entry->lblk, __entry->pblk,
-                 __entry->len, __entry->ret)
+                 __entry->len, __entry->flags, __entry->ret)
 );
 
 DEFINE_EVENT(ext4__map_blocks_exit, ext4_ext_map_blocks_exit,
-       TP_PROTO(struct inode *inode, ext4_lblk_t lblk,
-                ext4_fsblk_t pblk, unsigned len, int ret),
+       TP_PROTO(struct inode *inode, struct ext4_map_blocks *map, int ret),
 
-       TP_ARGS(inode, lblk, pblk, len, ret)
+       TP_ARGS(inode, map, ret)
 );
 
 DEFINE_EVENT(ext4__map_blocks_exit, ext4_ind_map_blocks_exit,
-       TP_PROTO(struct inode *inode, ext4_lblk_t lblk,
-                ext4_fsblk_t pblk, unsigned len, int ret),
+       TP_PROTO(struct inode *inode, struct ext4_map_blocks *map, int ret),
 
-       TP_ARGS(inode, lblk, pblk, len, ret)
+       TP_ARGS(inode, map, ret)
 );
 
 TRACE_EVENT(ext4_ext_load_extent,
@@ -1680,10 +1680,10 @@ DEFINE_EVENT(ext4__trim, ext4_trim_all_free,
 );
 
 TRACE_EVENT(ext4_ext_handle_uninitialized_extents,
-       TP_PROTO(struct inode *inode, struct ext4_map_blocks *map,
+       TP_PROTO(struct inode *inode, struct ext4_map_blocks *map, int flags,
                 unsigned int allocated, ext4_fsblk_t newblock),
 
-       TP_ARGS(inode, map, allocated, newblock),
+       TP_ARGS(inode, map, flags, allocated, newblock),
 
        TP_STRUCT__entry(
                __field(        dev_t,          dev             )
@@ -1699,7 +1699,7 @@ TRACE_EVENT(ext4_ext_handle_uninitialized_extents,
        TP_fast_assign(
                __entry->dev            = inode->i_sb->s_dev;
                __entry->ino            = inode->i_ino;
-               __entry->flags          = map->m_flags;
+               __entry->flags          = flags;
                __entry->lblk           = map->m_lblk;
                __entry->pblk           = map->m_pblk;
                __entry->len            = map->m_len;
@@ -1707,7 +1707,7 @@ TRACE_EVENT(ext4_ext_handle_uninitialized_extents,
                __entry->newblk         = newblock;
        ),
 
-       TP_printk("dev %d,%d ino %lu m_lblk %u m_pblk %llu m_len %u flags %d"
+       TP_printk("dev %d,%d ino %lu m_lblk %u m_pblk %llu m_len %u flags %"
                  "allocated %d newblock %llu",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  (unsigned long) __entry->ino,
@@ -2055,6 +2055,106 @@ TRACE_EVENT(ext4_ext_remove_space_done,
                  (unsigned short) __entry->eh_entries)
 );
 
+TRACE_EVENT(ext4_es_insert_extent,
+       TP_PROTO(struct inode *inode, ext4_lblk_t start, ext4_lblk_t len),
+
+       TP_ARGS(inode, start, len),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        loff_t, start                   )
+               __field(        loff_t, len                     )
+       ),
+
+       TP_fast_assign(
+               __entry->dev    = inode->i_sb->s_dev;
+               __entry->ino    = inode->i_ino;
+               __entry->start  = start;
+               __entry->len    = len;
+       ),
+
+       TP_printk("dev %d,%d ino %lu es [%lld/%lld)",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 __entry->start, __entry->len)
+);
+
+TRACE_EVENT(ext4_es_remove_extent,
+       TP_PROTO(struct inode *inode, ext4_lblk_t start, ext4_lblk_t len),
+
+       TP_ARGS(inode, start, len),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,  dev                     )
+               __field(        ino_t,  ino                     )
+               __field(        loff_t, start                   )
+               __field(        loff_t, len                     )
+       ),
+
+       TP_fast_assign(
+               __entry->dev    = inode->i_sb->s_dev;
+               __entry->ino    = inode->i_ino;
+               __entry->start  = start;
+               __entry->len    = len;
+       ),
+
+       TP_printk("dev %d,%d ino %lu es [%lld/%lld)",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 __entry->start, __entry->len)
+);
+
+TRACE_EVENT(ext4_es_find_extent_enter,
+       TP_PROTO(struct inode *inode, ext4_lblk_t start),
+
+       TP_ARGS(inode, start),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,          dev             )
+               __field(        ino_t,          ino             )
+               __field(        ext4_lblk_t,    start           )
+       ),
+
+       TP_fast_assign(
+               __entry->dev    = inode->i_sb->s_dev;
+               __entry->ino    = inode->i_ino;
+               __entry->start  = start;
+       ),
+
+       TP_printk("dev %d,%d ino %lu start %u",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino, __entry->start)
+);
+
+TRACE_EVENT(ext4_es_find_extent_exit,
+       TP_PROTO(struct inode *inode, struct extent_status *es,
+                ext4_lblk_t ret),
+
+       TP_ARGS(inode, es, ret),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,          dev             )
+               __field(        ino_t,          ino             )
+               __field(        ext4_lblk_t,    start           )
+               __field(        ext4_lblk_t,    len             )
+               __field(        ext4_lblk_t,    ret             )
+       ),
+
+       TP_fast_assign(
+               __entry->dev    = inode->i_sb->s_dev;
+               __entry->ino    = inode->i_ino;
+               __entry->start  = es->start;
+               __entry->len    = es->len;
+               __entry->ret    = ret;
+       ),
+
+       TP_printk("dev %d,%d ino %lu es [%u/%u) ret %u",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 __entry->start, __entry->len, __entry->ret)
+);
+
 #endif /* _TRACE_EXT4_H */
 
 /* This part must be outside protection */
diff --git a/include/trace/events/migrate.h b/include/trace/events/migrate.h
new file mode 100644 (file)
index 0000000..ec2a6cc
--- /dev/null
@@ -0,0 +1,51 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM migrate
+
+#if !defined(_TRACE_MIGRATE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_MIGRATE_H
+
+#define MIGRATE_MODE                                           \
+       {MIGRATE_ASYNC,         "MIGRATE_ASYNC"},               \
+       {MIGRATE_SYNC_LIGHT,    "MIGRATE_SYNC_LIGHT"},          \
+       {MIGRATE_SYNC,          "MIGRATE_SYNC"}         
+
+#define MIGRATE_REASON                                         \
+       {MR_COMPACTION,         "compaction"},                  \
+       {MR_MEMORY_FAILURE,     "memory_failure"},              \
+       {MR_MEMORY_HOTPLUG,     "memory_hotplug"},              \
+       {MR_SYSCALL,            "syscall_or_cpuset"},           \
+       {MR_MEMPOLICY_MBIND,    "mempolicy_mbind"},             \
+       {MR_CMA,                "cma"}
+
+TRACE_EVENT(mm_migrate_pages,
+
+       TP_PROTO(unsigned long succeeded, unsigned long failed,
+                enum migrate_mode mode, int reason),
+
+       TP_ARGS(succeeded, failed, mode, reason),
+
+       TP_STRUCT__entry(
+               __field(        unsigned long,          succeeded)
+               __field(        unsigned long,          failed)
+               __field(        enum migrate_mode,      mode)
+               __field(        int,                    reason)
+       ),
+
+       TP_fast_assign(
+               __entry->succeeded      = succeeded;
+               __entry->failed         = failed;
+               __entry->mode           = mode;
+               __entry->reason         = reason;
+       ),
+
+       TP_printk("nr_succeeded=%lu nr_failed=%lu mode=%s reason=%s",
+               __entry->succeeded,
+               __entry->failed,
+               __print_symbolic(__entry->mode, MIGRATE_MODE),
+               __print_symbolic(__entry->reason, MIGRATE_REASON))
+);
+
+#endif /* _TRACE_MIGRATE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
index 1e3481e..8d1e2bb 100644 (file)
@@ -778,6 +778,7 @@ struct drm_event_vblank {
 #define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3
 #define DRM_CAP_DUMB_PREFER_SHADOW 0x4
 #define DRM_CAP_PRIME 0x5
+#define DRM_CAP_TIMESTAMP_MONOTONIC 0x6
 
 #define DRM_PRIME_CAP_IMPORT 0x1
 #define DRM_PRIME_CAP_EXPORT 0x2
index c0494d5..e7f52c3 100644 (file)
@@ -133,17 +133,26 @@ struct drm_exynos_g2d_cmd {
        __u32   data;
 };
 
+enum drm_exynos_g2d_buf_type {
+       G2D_BUF_USERPTR = 1 << 31,
+};
+
 enum drm_exynos_g2d_event_type {
        G2D_EVENT_NOT,
        G2D_EVENT_NONSTOP,
        G2D_EVENT_STOP,         /* not yet */
 };
 
+struct drm_exynos_g2d_userptr {
+       unsigned long userptr;
+       unsigned long size;
+};
+
 struct drm_exynos_g2d_set_cmdlist {
        __u64                                   cmd;
-       __u64                                   cmd_gem;
+       __u64                                   cmd_buf;
        __u32                                   cmd_nr;
-       __u32                                   cmd_gem_nr;
+       __u32                                   cmd_buf_nr;
 
        /* for g2d event */
        __u64                                   event_type;
@@ -154,6 +163,170 @@ struct drm_exynos_g2d_exec {
        __u64                                   async;
 };
 
+enum drm_exynos_ops_id {
+       EXYNOS_DRM_OPS_SRC,
+       EXYNOS_DRM_OPS_DST,
+       EXYNOS_DRM_OPS_MAX,
+};
+
+struct drm_exynos_sz {
+       __u32   hsize;
+       __u32   vsize;
+};
+
+struct drm_exynos_pos {
+       __u32   x;
+       __u32   y;
+       __u32   w;
+       __u32   h;
+};
+
+enum drm_exynos_flip {
+       EXYNOS_DRM_FLIP_NONE = (0 << 0),
+       EXYNOS_DRM_FLIP_VERTICAL = (1 << 0),
+       EXYNOS_DRM_FLIP_HORIZONTAL = (1 << 1),
+};
+
+enum drm_exynos_degree {
+       EXYNOS_DRM_DEGREE_0,
+       EXYNOS_DRM_DEGREE_90,
+       EXYNOS_DRM_DEGREE_180,
+       EXYNOS_DRM_DEGREE_270,
+};
+
+enum drm_exynos_planer {
+       EXYNOS_DRM_PLANAR_Y,
+       EXYNOS_DRM_PLANAR_CB,
+       EXYNOS_DRM_PLANAR_CR,
+       EXYNOS_DRM_PLANAR_MAX,
+};
+
+/**
+ * A structure for ipp supported property list.
+ *
+ * @version: version of this structure.
+ * @ipp_id: id of ipp driver.
+ * @count: count of ipp driver.
+ * @writeback: flag of writeback supporting.
+ * @flip: flag of flip supporting.
+ * @degree: flag of degree information.
+ * @csc: flag of csc supporting.
+ * @crop: flag of crop supporting.
+ * @scale: flag of scale supporting.
+ * @refresh_min: min hz of refresh.
+ * @refresh_max: max hz of refresh.
+ * @crop_min: crop min resolution.
+ * @crop_max: crop max resolution.
+ * @scale_min: scale min resolution.
+ * @scale_max: scale max resolution.
+ */
+struct drm_exynos_ipp_prop_list {
+       __u32   version;
+       __u32   ipp_id;
+       __u32   count;
+       __u32   writeback;
+       __u32   flip;
+       __u32   degree;
+       __u32   csc;
+       __u32   crop;
+       __u32   scale;
+       __u32   refresh_min;
+       __u32   refresh_max;
+       __u32   reserved;
+       struct drm_exynos_sz    crop_min;
+       struct drm_exynos_sz    crop_max;
+       struct drm_exynos_sz    scale_min;
+       struct drm_exynos_sz    scale_max;
+};
+
+/**
+ * A structure for ipp config.
+ *
+ * @ops_id: property of operation directions.
+ * @flip: property of mirror, flip.
+ * @degree: property of rotation degree.
+ * @fmt: property of image format.
+ * @sz: property of image size.
+ * @pos: property of image position(src-cropped,dst-scaler).
+ */
+struct drm_exynos_ipp_config {
+       enum drm_exynos_ops_id ops_id;
+       enum drm_exynos_flip    flip;
+       enum drm_exynos_degree  degree;
+       __u32   fmt;
+       struct drm_exynos_sz    sz;
+       struct drm_exynos_pos   pos;
+};
+
+enum drm_exynos_ipp_cmd {
+       IPP_CMD_NONE,
+       IPP_CMD_M2M,
+       IPP_CMD_WB,
+       IPP_CMD_OUTPUT,
+       IPP_CMD_MAX,
+};
+
+/**
+ * A structure for ipp property.
+ *
+ * @config: source, destination config.
+ * @cmd: definition of command.
+ * @ipp_id: id of ipp driver.
+ * @prop_id: id of property.
+ * @refresh_rate: refresh rate.
+ */
+struct drm_exynos_ipp_property {
+       struct drm_exynos_ipp_config config[EXYNOS_DRM_OPS_MAX];
+       enum drm_exynos_ipp_cmd cmd;
+       __u32   ipp_id;
+       __u32   prop_id;
+       __u32   refresh_rate;
+};
+
+enum drm_exynos_ipp_buf_type {
+       IPP_BUF_ENQUEUE,
+       IPP_BUF_DEQUEUE,
+};
+
+/**
+ * A structure for ipp buffer operations.
+ *
+ * @ops_id: operation directions.
+ * @buf_type: definition of buffer.
+ * @prop_id: id of property.
+ * @buf_id: id of buffer.
+ * @handle: Y, Cb, Cr each planar handle.
+ * @user_data: user data.
+ */
+struct drm_exynos_ipp_queue_buf {
+       enum drm_exynos_ops_id  ops_id;
+       enum drm_exynos_ipp_buf_type    buf_type;
+       __u32   prop_id;
+       __u32   buf_id;
+       __u32   handle[EXYNOS_DRM_PLANAR_MAX];
+       __u32   reserved;
+       __u64   user_data;
+};
+
+enum drm_exynos_ipp_ctrl {
+       IPP_CTRL_PLAY,
+       IPP_CTRL_STOP,
+       IPP_CTRL_PAUSE,
+       IPP_CTRL_RESUME,
+       IPP_CTRL_MAX,
+};
+
+/**
+ * A structure for ipp start/stop operations.
+ *
+ * @prop_id: id of property.
+ * @ctrl: definition of control.
+ */
+struct drm_exynos_ipp_cmd_ctrl {
+       __u32   prop_id;
+       enum drm_exynos_ipp_ctrl        ctrl;
+};
+
 #define DRM_EXYNOS_GEM_CREATE          0x00
 #define DRM_EXYNOS_GEM_MAP_OFFSET      0x01
 #define DRM_EXYNOS_GEM_MMAP            0x02
@@ -166,6 +339,12 @@ struct drm_exynos_g2d_exec {
 #define DRM_EXYNOS_G2D_SET_CMDLIST     0x21
 #define DRM_EXYNOS_G2D_EXEC            0x22
 
+/* IPP - Image Post Processing */
+#define DRM_EXYNOS_IPP_GET_PROPERTY    0x30
+#define DRM_EXYNOS_IPP_SET_PROPERTY    0x31
+#define DRM_EXYNOS_IPP_QUEUE_BUF       0x32
+#define DRM_EXYNOS_IPP_CMD_CTRL        0x33
+
 #define DRM_IOCTL_EXYNOS_GEM_CREATE            DRM_IOWR(DRM_COMMAND_BASE + \
                DRM_EXYNOS_GEM_CREATE, struct drm_exynos_gem_create)
 
@@ -188,8 +367,18 @@ struct drm_exynos_g2d_exec {
 #define DRM_IOCTL_EXYNOS_G2D_EXEC              DRM_IOWR(DRM_COMMAND_BASE + \
                DRM_EXYNOS_G2D_EXEC, struct drm_exynos_g2d_exec)
 
+#define DRM_IOCTL_EXYNOS_IPP_GET_PROPERTY      DRM_IOWR(DRM_COMMAND_BASE + \
+               DRM_EXYNOS_IPP_GET_PROPERTY, struct drm_exynos_ipp_prop_list)
+#define DRM_IOCTL_EXYNOS_IPP_SET_PROPERTY      DRM_IOWR(DRM_COMMAND_BASE + \
+               DRM_EXYNOS_IPP_SET_PROPERTY, struct drm_exynos_ipp_property)
+#define DRM_IOCTL_EXYNOS_IPP_QUEUE_BUF DRM_IOWR(DRM_COMMAND_BASE + \
+               DRM_EXYNOS_IPP_QUEUE_BUF, struct drm_exynos_ipp_queue_buf)
+#define DRM_IOCTL_EXYNOS_IPP_CMD_CTRL          DRM_IOWR(DRM_COMMAND_BASE + \
+               DRM_EXYNOS_IPP_CMD_CTRL, struct drm_exynos_ipp_cmd_ctrl)
+
 /* EXYNOS specific events */
 #define DRM_EXYNOS_G2D_EVENT           0x80000000
+#define DRM_EXYNOS_IPP_EVENT           0x80000001
 
 struct drm_exynos_g2d_event {
        struct drm_event        base;
@@ -200,4 +389,14 @@ struct drm_exynos_g2d_event {
        __u32                   reserved;
 };
 
+struct drm_exynos_ipp_event {
+       struct drm_event        base;
+       __u64                   user_data;
+       __u32                   tv_sec;
+       __u32                   tv_usec;
+       __u32                   prop_id;
+       __u32                   reserved;
+       __u32                   buf_id[EXYNOS_DRM_OPS_MAX];
+};
+
 #endif /* _UAPI_EXYNOS_DRM_H_ */
index 4322b1e..b746a3c 100644 (file)
@@ -306,6 +306,7 @@ typedef struct drm_i915_irq_wait {
 #define I915_PARAM_HAS_SEMAPHORES       20
 #define I915_PARAM_HAS_PRIME_VMAP_FLUSH         21
 #define I915_PARAM_RSVD_FOR_FUTURE_USE  22
+#define I915_PARAM_HAS_SECURE_BATCHES   23
 
 typedef struct drm_i915_getparam {
        int param;
@@ -671,6 +672,11 @@ struct drm_i915_gem_execbuffer2 {
 /** Resets the SO write offset registers for transform feedback on gen7. */
 #define I915_EXEC_GEN7_SOL_RESET       (1<<8)
 
+/** Request a privileged ("secure") batch buffer. Note only available for
+ * DRM_ROOT_ONLY | DRM_MASTER processes.
+ */
+#define I915_EXEC_SECURE               (1<<9)
+
 #define I915_EXEC_CONTEXT_ID_MASK      (0xffffffff)
 #define i915_execbuffer2_set_context_id(eb2, context) \
        (eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK
index 4766c0f..eeda917 100644 (file)
@@ -913,9 +913,11 @@ struct drm_radeon_gem_va {
 /* The first dword of RADEON_CHUNK_ID_FLAGS is a uint32 of these flags: */
 #define RADEON_CS_KEEP_TILING_FLAGS 0x01
 #define RADEON_CS_USE_VM            0x02
+#define RADEON_CS_END_OF_FRAME      0x04 /* a hint from userspace which CS is the last one */
 /* The second dword of RADEON_CHUNK_ID_FLAGS is a uint32 that sets the ring type */
 #define RADEON_CS_RING_GFX          0
 #define RADEON_CS_RING_COMPUTE      1
+#define RADEON_CS_RING_DMA          2
 /* The third dword of RADEON_CHUNK_ID_FLAGS is a sint32 that sets the priority */
 /* 0 = normal, + = higher priority, - = lower priority */
 
@@ -966,6 +968,10 @@ struct drm_radeon_cs {
 #define RADEON_INFO_MAX_PIPES          0x10
 /* timestamp for GL_ARB_timer_query (OpenGL), returns the current GPU clock */
 #define RADEON_INFO_TIMESTAMP          0x11
+/* max shader engines (SE) - needed for geometry shaders, etc. */
+#define RADEON_INFO_MAX_SE             0x12
+/* max SH per SE */
+#define RADEON_INFO_MAX_SH_PER_SE      0x13
 
 struct drm_radeon_info {
        uint32_t                request;
index 23e62e0..0d11c3d 100644 (file)
@@ -20,6 +20,7 @@ enum {
        MPOL_PREFERRED,
        MPOL_BIND,
        MPOL_INTERLEAVE,
+       MPOL_LOCAL,
        MPOL_MAX,       /* always last member of enum */
 };
 
@@ -47,9 +48,15 @@ enum mpol_rebind_step {
 
 /* Flags for mbind */
 #define MPOL_MF_STRICT (1<<0)  /* Verify existing pages in the mapping */
-#define MPOL_MF_MOVE   (1<<1)  /* Move pages owned by this process to conform to mapping */
-#define MPOL_MF_MOVE_ALL (1<<2)        /* Move every page to conform to mapping */
-#define MPOL_MF_INTERNAL (1<<3)        /* Internal flags start here */
+#define MPOL_MF_MOVE    (1<<1) /* Move pages owned by this process to conform
+                                  to policy */
+#define MPOL_MF_MOVE_ALL (1<<2)        /* Move every page to conform to policy */
+#define MPOL_MF_LAZY    (1<<3) /* Modifies '_MOVE:  lazy migrate on fault */
+#define MPOL_MF_INTERNAL (1<<4)        /* Internal flags start here */
+
+#define MPOL_MF_VALID  (MPOL_MF_STRICT   |     \
+                        MPOL_MF_MOVE     |     \
+                        MPOL_MF_MOVE_ALL)
 
 /*
  * Internal flags that share the struct mempolicy flags word with
@@ -59,6 +66,8 @@ enum mpol_rebind_step {
 #define MPOL_F_SHARED  (1 << 0)        /* identify shared policies */
 #define MPOL_F_LOCAL   (1 << 1)        /* preferred local allocation */
 #define MPOL_F_REBINDING (1 << 2)      /* identify policies in rebinding */
+#define MPOL_F_MOF     (1 << 3) /* this policy wants migrate on fault */
+#define MPOL_F_MORON   (1 << 4) /* Migrate On pte_numa Reference On Node */
 
 
 #endif /* _UAPI_LINUX_MEMPOLICY_H */
index 2054e04..1a207ef 100644 (file)
@@ -717,6 +717,50 @@ config LOG_BUF_SHIFT
 config HAVE_UNSTABLE_SCHED_CLOCK
        bool
 
+#
+# For architectures that want to enable the support for NUMA-affine scheduler
+# balancing logic:
+#
+config ARCH_SUPPORTS_NUMA_BALANCING
+       bool
+
+# For architectures that (ab)use NUMA to represent different memory regions
+# all cpu-local but of different latencies, such as SuperH.
+#
+config ARCH_WANT_NUMA_VARIABLE_LOCALITY
+       bool
+
+#
+# For architectures that are willing to define _PAGE_NUMA as _PAGE_PROTNONE
+config ARCH_WANTS_PROT_NUMA_PROT_NONE
+       bool
+
+config ARCH_USES_NUMA_PROT_NONE
+       bool
+       default y
+       depends on ARCH_WANTS_PROT_NUMA_PROT_NONE
+       depends on NUMA_BALANCING
+
+config NUMA_BALANCING_DEFAULT_ENABLED
+       bool "Automatically enable NUMA aware memory/task placement"
+       default y
+       depends on NUMA_BALANCING
+       help
+         If set, autonumic NUMA balancing will be enabled if running on a NUMA
+         machine.
+
+config NUMA_BALANCING
+       bool "Memory placement aware NUMA scheduler"
+       depends on ARCH_SUPPORTS_NUMA_BALANCING
+       depends on !ARCH_WANT_NUMA_VARIABLE_LOCALITY
+       depends on SMP && NUMA && MIGRATION
+       help
+         This option adds support for automatic NUMA aware memory/task placement.
+         The mechanism is quite primitive and is based on migrating memory when
+         it is references to the node the task is running on.
+
+         This system will be inactive on UMA systems.
+
 menuconfig CGROUPS
        boolean "Control Group support"
        depends on EVENTFD
index 6af5470..63ae904 100644 (file)
@@ -463,10 +463,6 @@ static void __init mm_init(void)
        percpu_init_late();
        pgtable_cache_init();
        vmalloc_init();
-#ifdef CONFIG_X86
-       if (efi_enabled)
-               efi_enter_virtual_mode();
-#endif
 }
 
 asmlinkage void __init start_kernel(void)
@@ -607,6 +603,10 @@ asmlinkage void __init start_kernel(void)
        calibrate_delay();
        pidmap_init();
        anon_vma_init();
+#ifdef CONFIG_X86
+       if (efi_enabled)
+               efi_enter_virtual_mode();
+#endif
        thread_info_cache_init();
        cred_init();
        fork_init(totalram_pages);
index 48cea3d..8888afb 100644 (file)
 static struct kmem_cache *cred_jar;
 
 /*
- * The common credentials for the initial task's thread group
- */
-#ifdef CONFIG_KEYS
-static struct thread_group_cred init_tgcred = {
-       .usage  = ATOMIC_INIT(2),
-       .tgid   = 0,
-       .lock   = __SPIN_LOCK_UNLOCKED(init_cred.tgcred.lock),
-};
-#endif
-
-/*
  * The initial credentials for the initial task
  */
 struct cred init_cred = {
@@ -65,9 +54,6 @@ struct cred init_cred = {
        .user                   = INIT_USER,
        .user_ns                = &init_user_ns,
        .group_info             = &init_groups,
-#ifdef CONFIG_KEYS
-       .tgcred                 = &init_tgcred,
-#endif
 };
 
 static inline void set_cred_subscribers(struct cred *cred, int n)
@@ -96,36 +82,6 @@ static inline void alter_cred_subscribers(const struct cred *_cred, int n)
 }
 
 /*
- * Dispose of the shared task group credentials
- */
-#ifdef CONFIG_KEYS
-static void release_tgcred_rcu(struct rcu_head *rcu)
-{
-       struct thread_group_cred *tgcred =
-               container_of(rcu, struct thread_group_cred, rcu);
-
-       BUG_ON(atomic_read(&tgcred->usage) != 0);
-
-       key_put(tgcred->session_keyring);
-       key_put(tgcred->process_keyring);
-       kfree(tgcred);
-}
-#endif
-
-/*
- * Release a set of thread group credentials.
- */
-static void release_tgcred(struct cred *cred)
-{
-#ifdef CONFIG_KEYS
-       struct thread_group_cred *tgcred = cred->tgcred;
-
-       if (atomic_dec_and_test(&tgcred->usage))
-               call_rcu(&tgcred->rcu, release_tgcred_rcu);
-#endif
-}
-
-/*
  * The RCU callback to actually dispose of a set of credentials
  */
 static void put_cred_rcu(struct rcu_head *rcu)
@@ -150,9 +106,10 @@ static void put_cred_rcu(struct rcu_head *rcu)
 #endif
 
        security_cred_free(cred);
+       key_put(cred->session_keyring);
+       key_put(cred->process_keyring);
        key_put(cred->thread_keyring);
        key_put(cred->request_key_auth);
-       release_tgcred(cred);
        if (cred->group_info)
                put_group_info(cred->group_info);
        free_uid(cred->user);
@@ -246,15 +203,6 @@ struct cred *cred_alloc_blank(void)
        if (!new)
                return NULL;
 
-#ifdef CONFIG_KEYS
-       new->tgcred = kzalloc(sizeof(*new->tgcred), GFP_KERNEL);
-       if (!new->tgcred) {
-               kmem_cache_free(cred_jar, new);
-               return NULL;
-       }
-       atomic_set(&new->tgcred->usage, 1);
-#endif
-
        atomic_set(&new->usage, 1);
 #ifdef CONFIG_DEBUG_CREDENTIALS
        new->magic = CRED_MAGIC;
@@ -308,9 +256,10 @@ struct cred *prepare_creds(void)
        get_user_ns(new->user_ns);
 
 #ifdef CONFIG_KEYS
+       key_get(new->session_keyring);
+       key_get(new->process_keyring);
        key_get(new->thread_keyring);
        key_get(new->request_key_auth);
-       atomic_inc(&new->tgcred->usage);
 #endif
 
 #ifdef CONFIG_SECURITY
@@ -334,39 +283,20 @@ EXPORT_SYMBOL(prepare_creds);
  */
 struct cred *prepare_exec_creds(void)
 {
-       struct thread_group_cred *tgcred = NULL;
        struct cred *new;
 
-#ifdef CONFIG_KEYS
-       tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
-       if (!tgcred)
-               return NULL;
-#endif
-
        new = prepare_creds();
-       if (!new) {
-               kfree(tgcred);
+       if (!new)
                return new;
-       }
 
 #ifdef CONFIG_KEYS
        /* newly exec'd tasks don't get a thread keyring */
        key_put(new->thread_keyring);
        new->thread_keyring = NULL;
 
-       /* create a new per-thread-group creds for all this set of threads to
-        * share */
-       memcpy(tgcred, new->tgcred, sizeof(struct thread_group_cred));
-
-       atomic_set(&tgcred->usage, 1);
-       spin_lock_init(&tgcred->lock);
-
        /* inherit the session keyring; new process keyring */
-       key_get(tgcred->session_keyring);
-       tgcred->process_keyring = NULL;
-
-       release_tgcred(new);
-       new->tgcred = tgcred;
+       key_put(new->process_keyring);
+       new->process_keyring = NULL;
 #endif
 
        return new;
@@ -383,9 +313,6 @@ struct cred *prepare_exec_creds(void)
  */
 int copy_creds(struct task_struct *p, unsigned long clone_flags)
 {
-#ifdef CONFIG_KEYS
-       struct thread_group_cred *tgcred;
-#endif
        struct cred *new;
        int ret;
 
@@ -425,22 +352,12 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags)
                        install_thread_keyring_to_cred(new);
        }
 
-       /* we share the process and session keyrings between all the threads in
-        * a process - this is slightly icky as we violate COW credentials a
-        * bit */
+       /* The process keyring is only shared between the threads in a process;
+        * anything outside of those threads doesn't inherit.
+        */
        if (!(clone_flags & CLONE_THREAD)) {
-               tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
-               if (!tgcred) {
-                       ret = -ENOMEM;
-                       goto error_put;
-               }
-               atomic_set(&tgcred->usage, 1);
-               spin_lock_init(&tgcred->lock);
-               tgcred->process_keyring = NULL;
-               tgcred->session_keyring = key_get(new->tgcred->session_keyring);
-
-               release_tgcred(new);
-               new->tgcred = tgcred;
+               key_put(new->process_keyring);
+               new->process_keyring = NULL;
        }
 #endif
 
@@ -643,9 +560,6 @@ void __init cred_init(void)
  */
 struct cred *prepare_kernel_cred(struct task_struct *daemon)
 {
-#ifdef CONFIG_KEYS
-       struct thread_group_cred *tgcred;
-#endif
        const struct cred *old;
        struct cred *new;
 
@@ -653,14 +567,6 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
        if (!new)
                return NULL;
 
-#ifdef CONFIG_KEYS
-       tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
-       if (!tgcred) {
-               kmem_cache_free(cred_jar, new);
-               return NULL;
-       }
-#endif
-
        kdebug("prepare_kernel_cred() alloc %p", new);
 
        if (daemon)
@@ -678,13 +584,10 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
        get_group_info(new->group_info);
 
 #ifdef CONFIG_KEYS
-       atomic_set(&tgcred->usage, 1);
-       spin_lock_init(&tgcred->lock);
-       tgcred->process_keyring = NULL;
-       tgcred->session_keyring = NULL;
-       new->tgcred = tgcred;
-       new->request_key_auth = NULL;
+       new->session_keyring = NULL;
+       new->process_keyring = NULL;
        new->thread_keyring = NULL;
+       new->request_key_auth = NULL;
        new->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING;
 #endif
 
index 3c31e87..115d6c2 100644 (file)
@@ -823,6 +823,9 @@ struct mm_struct *dup_mm(struct task_struct *tsk)
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
        mm->pmd_huge_pte = NULL;
 #endif
+#ifdef CONFIG_NUMA_BALANCING
+       mm->first_nid = NUMA_PTE_SCAN_INIT;
+#endif
        if (!mm_init(mm, tsk))
                goto fail_nomem;
 
index 0533496..c1fb821 100644 (file)
@@ -193,23 +193,10 @@ static void sched_feat_disable(int i) { };
 static void sched_feat_enable(int i) { };
 #endif /* HAVE_JUMP_LABEL */
 
-static ssize_t
-sched_feat_write(struct file *filp, const char __user *ubuf,
-               size_t cnt, loff_t *ppos)
+static int sched_feat_set(char *cmp)
 {
-       char buf[64];
-       char *cmp;
-       int neg = 0;
        int i;
-
-       if (cnt > 63)
-               cnt = 63;
-
-       if (copy_from_user(&buf, ubuf, cnt))
-               return -EFAULT;
-
-       buf[cnt] = 0;
-       cmp = strstrip(buf);
+       int neg = 0;
 
        if (strncmp(cmp, "NO_", 3) == 0) {
                neg = 1;
@@ -229,6 +216,27 @@ sched_feat_write(struct file *filp, const char __user *ubuf,
                }
        }
 
+       return i;
+}
+
+static ssize_t
+sched_feat_write(struct file *filp, const char __user *ubuf,
+               size_t cnt, loff_t *ppos)
+{
+       char buf[64];
+       char *cmp;
+       int i;
+
+       if (cnt > 63)
+               cnt = 63;
+
+       if (copy_from_user(&buf, ubuf, cnt))
+               return -EFAULT;
+
+       buf[cnt] = 0;
+       cmp = strstrip(buf);
+
+       i = sched_feat_set(cmp);
        if (i == __SCHED_FEAT_NR)
                return -EINVAL;
 
@@ -1560,7 +1568,40 @@ static void __sched_fork(struct task_struct *p)
 #ifdef CONFIG_PREEMPT_NOTIFIERS
        INIT_HLIST_HEAD(&p->preempt_notifiers);
 #endif
+
+#ifdef CONFIG_NUMA_BALANCING
+       if (p->mm && atomic_read(&p->mm->mm_users) == 1) {
+               p->mm->numa_next_scan = jiffies;
+               p->mm->numa_next_reset = jiffies;
+               p->mm->numa_scan_seq = 0;
+       }
+
+       p->node_stamp = 0ULL;
+       p->numa_scan_seq = p->mm ? p->mm->numa_scan_seq : 0;
+       p->numa_migrate_seq = p->mm ? p->mm->numa_scan_seq - 1 : 0;
+       p->numa_scan_period = sysctl_numa_balancing_scan_delay;
+       p->numa_work.next = &p->numa_work;
+#endif /* CONFIG_NUMA_BALANCING */
+}
+
+#ifdef CONFIG_NUMA_BALANCING
+#ifdef CONFIG_SCHED_DEBUG
+void set_numabalancing_state(bool enabled)
+{
+       if (enabled)
+               sched_feat_set("NUMA");
+       else
+               sched_feat_set("NO_NUMA");
+}
+#else
+__read_mostly bool numabalancing_enabled;
+
+void set_numabalancing_state(bool enabled)
+{
+       numabalancing_enabled = enabled;
 }
+#endif /* CONFIG_SCHED_DEBUG */
+#endif /* CONFIG_NUMA_BALANCING */
 
 /*
  * fork()/clone()-time setup:
index 756f9f9..4603d6c 100644 (file)
@@ -26,6 +26,9 @@
 #include <linux/slab.h>
 #include <linux/profile.h>
 #include <linux/interrupt.h>
+#include <linux/mempolicy.h>
+#include <linux/migrate.h>
+#include <linux/task_work.h>
 
 #include <trace/events/sched.h>
 
@@ -774,6 +777,227 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
  * Scheduling class queueing methods:
  */
 
+#ifdef CONFIG_NUMA_BALANCING
+/*
+ * numa task sample period in ms
+ */
+unsigned int sysctl_numa_balancing_scan_period_min = 100;
+unsigned int sysctl_numa_balancing_scan_period_max = 100*50;
+unsigned int sysctl_numa_balancing_scan_period_reset = 100*600;
+
+/* Portion of address space to scan in MB */
+unsigned int sysctl_numa_balancing_scan_size = 256;
+
+/* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */
+unsigned int sysctl_numa_balancing_scan_delay = 1000;
+
+static void task_numa_placement(struct task_struct *p)
+{
+       int seq = ACCESS_ONCE(p->mm->numa_scan_seq);
+
+       if (p->numa_scan_seq == seq)
+               return;
+       p->numa_scan_seq = seq;
+
+       /* FIXME: Scheduling placement policy hints go here */
+}
+
+/*
+ * Got a PROT_NONE fault for a page on @node.
+ */
+void task_numa_fault(int node, int pages, bool migrated)
+{
+       struct task_struct *p = current;
+
+       if (!sched_feat_numa(NUMA))
+               return;
+
+       /* FIXME: Allocate task-specific structure for placement policy here */
+
+       /*
+        * If pages are properly placed (did not migrate) then scan slower.
+        * This is reset periodically in case of phase changes
+        */
+        if (!migrated)
+               p->numa_scan_period = min(sysctl_numa_balancing_scan_period_max,
+                       p->numa_scan_period + jiffies_to_msecs(10));
+
+       task_numa_placement(p);
+}
+
+static void reset_ptenuma_scan(struct task_struct *p)
+{
+       ACCESS_ONCE(p->mm->numa_scan_seq)++;
+       p->mm->numa_scan_offset = 0;
+}
+
+/*
+ * The expensive part of numa migration is done from task_work context.
+ * Triggered from task_tick_numa().
+ */
+void task_numa_work(struct callback_head *work)
+{
+       unsigned long migrate, next_scan, now = jiffies;
+       struct task_struct *p = current;
+       struct mm_struct *mm = p->mm;
+       struct vm_area_struct *vma;
+       unsigned long start, end;
+       long pages;
+
+       WARN_ON_ONCE(p != container_of(work, struct task_struct, numa_work));
+
+       work->next = work; /* protect against double add */
+       /*
+        * Who cares about NUMA placement when they're dying.
+        *
+        * NOTE: make sure not to dereference p->mm before this check,
+        * exit_task_work() happens _after_ exit_mm() so we could be called
+        * without p->mm even though we still had it when we enqueued this
+        * work.
+        */
+       if (p->flags & PF_EXITING)
+               return;
+
+       /*
+        * We do not care about task placement until a task runs on a node
+        * other than the first one used by the address space. This is
+        * largely because migrations are driven by what CPU the task
+        * is running on. If it's never scheduled on another node, it'll
+        * not migrate so why bother trapping the fault.
+        */
+       if (mm->first_nid == NUMA_PTE_SCAN_INIT)
+               mm->first_nid = numa_node_id();
+       if (mm->first_nid != NUMA_PTE_SCAN_ACTIVE) {
+               /* Are we running on a new node yet? */
+               if (numa_node_id() == mm->first_nid &&
+                   !sched_feat_numa(NUMA_FORCE))
+                       return;
+
+               mm->first_nid = NUMA_PTE_SCAN_ACTIVE;
+       }
+
+       /*
+        * Reset the scan period if enough time has gone by. Objective is that
+        * scanning will be reduced if pages are properly placed. As tasks
+        * can enter different phases this needs to be re-examined. Lacking
+        * proper tracking of reference behaviour, this blunt hammer is used.
+        */
+       migrate = mm->numa_next_reset;
+       if (time_after(now, migrate)) {
+               p->numa_scan_period = sysctl_numa_balancing_scan_period_min;
+               next_scan = now + msecs_to_jiffies(sysctl_numa_balancing_scan_period_reset);
+               xchg(&mm->numa_next_reset, next_scan);
+       }
+
+       /*
+        * Enforce maximal scan/migration frequency..
+        */
+       migrate = mm->numa_next_scan;
+       if (time_before(now, migrate))
+               return;
+
+       if (p->numa_scan_period == 0)
+               p->numa_scan_period = sysctl_numa_balancing_scan_period_min;
+
+       next_scan = now + msecs_to_jiffies(p->numa_scan_period);
+       if (cmpxchg(&mm->numa_next_scan, migrate, next_scan) != migrate)
+               return;
+
+       /*
+        * Do not set pte_numa if the current running node is rate-limited.
+        * This loses statistics on the fault but if we are unwilling to
+        * migrate to this node, it is less likely we can do useful work
+        */
+       if (migrate_ratelimited(numa_node_id()))
+               return;
+
+       start = mm->numa_scan_offset;
+       pages = sysctl_numa_balancing_scan_size;
+       pages <<= 20 - PAGE_SHIFT; /* MB in pages */
+       if (!pages)
+               return;
+
+       down_read(&mm->mmap_sem);
+       vma = find_vma(mm, start);
+       if (!vma) {
+               reset_ptenuma_scan(p);
+               start = 0;
+               vma = mm->mmap;
+       }
+       for (; vma; vma = vma->vm_next) {
+               if (!vma_migratable(vma))
+                       continue;
+
+               /* Skip small VMAs. They are not likely to be of relevance */
+               if (vma->vm_end - vma->vm_start < HPAGE_SIZE)
+                       continue;
+
+               do {
+                       start = max(start, vma->vm_start);
+                       end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
+                       end = min(end, vma->vm_end);
+                       pages -= change_prot_numa(vma, start, end);
+
+                       start = end;
+                       if (pages <= 0)
+                               goto out;
+               } while (end != vma->vm_end);
+       }
+
+out:
+       /*
+        * It is possible to reach the end of the VMA list but the last few VMAs are
+        * not guaranteed to the vma_migratable. If they are not, we would find the
+        * !migratable VMA on the next scan but not reset the scanner to the start
+        * so check it now.
+        */
+       if (vma)
+               mm->numa_scan_offset = start;
+       else
+               reset_ptenuma_scan(p);
+       up_read(&mm->mmap_sem);
+}
+
+/*
+ * Drive the periodic memory faults..
+ */
+void task_tick_numa(struct rq *rq, struct task_struct *curr)
+{
+       struct callback_head *work = &curr->numa_work;
+       u64 period, now;
+
+       /*
+        * We don't care about NUMA placement if we don't have memory.
+        */
+       if (!curr->mm || (curr->flags & PF_EXITING) || work->next != work)
+               return;
+
+       /*
+        * Using runtime rather than walltime has the dual advantage that
+        * we (mostly) drive the selection from busy threads and that the
+        * task needs to have done some actual work before we bother with
+        * NUMA placement.
+        */
+       now = curr->se.sum_exec_runtime;
+       period = (u64)curr->numa_scan_period * NSEC_PER_MSEC;
+
+       if (now - curr->node_stamp > period) {
+               if (!curr->node_stamp)
+                       curr->numa_scan_period = sysctl_numa_balancing_scan_period_min;
+               curr->node_stamp = now;
+
+               if (!time_before(jiffies, curr->mm->numa_next_scan)) {
+                       init_task_work(work, task_numa_work); /* TODO: move this into sched_fork() */
+                       task_work_add(curr, work, true);
+               }
+       }
+}
+#else
+static void task_tick_numa(struct rq *rq, struct task_struct *curr)
+{
+}
+#endif /* CONFIG_NUMA_BALANCING */
+
 static void
 account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
@@ -5501,6 +5725,9 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
                entity_tick(cfs_rq, se, queued);
        }
 
+       if (sched_feat_numa(NUMA))
+               task_tick_numa(rq, curr);
+
        update_rq_runnable_avg(rq, 1);
 }
 
index e68e69a..1ad1d2b 100644 (file)
@@ -66,3 +66,14 @@ SCHED_FEAT(TTWU_QUEUE, true)
 SCHED_FEAT(FORCE_SD_OVERLAP, false)
 SCHED_FEAT(RT_RUNTIME_SHARE, true)
 SCHED_FEAT(LB_MIN, false)
+
+/*
+ * Apply the automatic NUMA scheduling policy. Enabled automatically
+ * at runtime if running on a NUMA machine. Can be controlled via
+ * numa_balancing=. Allow PTE scanning to be forced on UMA machines
+ * for debugging the core machinery.
+ */
+#ifdef CONFIG_NUMA_BALANCING
+SCHED_FEAT(NUMA,       false)
+SCHED_FEAT(NUMA_FORCE, false)
+#endif
index 5eca173..fc88644 100644 (file)
@@ -663,6 +663,18 @@ extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
 #define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
 #endif /* SCHED_DEBUG && HAVE_JUMP_LABEL */
 
+#ifdef CONFIG_NUMA_BALANCING
+#define sched_feat_numa(x) sched_feat(x)
+#ifdef CONFIG_SCHED_DEBUG
+#define numabalancing_enabled sched_feat_numa(NUMA)
+#else
+extern bool numabalancing_enabled;
+#endif /* CONFIG_SCHED_DEBUG */
+#else
+#define sched_feat_numa(x) (0)
+#define numabalancing_enabled (0)
+#endif /* CONFIG_NUMA_BALANCING */
+
 static inline u64 global_rt_period(void)
 {
        return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
index ee376be..5af44b5 100644 (file)
@@ -396,25 +396,29 @@ int __secure_computing(int this_syscall)
 #ifdef CONFIG_SECCOMP_FILTER
        case SECCOMP_MODE_FILTER: {
                int data;
+               struct pt_regs *regs = task_pt_regs(current);
                ret = seccomp_run_filters(this_syscall);
                data = ret & SECCOMP_RET_DATA;
                ret &= SECCOMP_RET_ACTION;
                switch (ret) {
                case SECCOMP_RET_ERRNO:
                        /* Set the low-order 16-bits as a errno. */
-                       syscall_set_return_value(current, task_pt_regs(current),
+                       syscall_set_return_value(current, regs,
                                                 -data, 0);
                        goto skip;
                case SECCOMP_RET_TRAP:
                        /* Show the handler the original registers. */
-                       syscall_rollback(current, task_pt_regs(current));
+                       syscall_rollback(current, regs);
                        /* Let the filter pass back 16 bits of data. */
                        seccomp_send_sigsys(this_syscall, data);
                        goto skip;
                case SECCOMP_RET_TRACE:
                        /* Skip these calls if there is no tracer. */
-                       if (!ptrace_event_enabled(current, PTRACE_EVENT_SECCOMP))
+                       if (!ptrace_event_enabled(current, PTRACE_EVENT_SECCOMP)) {
+                               syscall_set_return_value(current, regs,
+                                                        -ENOSYS, 0);
                                goto skip;
+                       }
                        /* Allow the BPF to provide the event message */
                        ptrace_event(PTRACE_EVENT_SECCOMP, data);
                        /*
@@ -425,6 +429,9 @@ int __secure_computing(int this_syscall)
                         */
                        if (fatal_signal_pending(current))
                                break;
+                       if (syscall_get_nr(current, regs) < 0)
+                               goto skip;  /* Explicit request to skip. */
+
                        return 0;
                case SECCOMP_RET_ALLOW:
                        return 0;
index 33f71f3..c88878d 100644 (file)
@@ -256,9 +256,11 @@ static int min_sched_granularity_ns = 100000;              /* 100 usecs */
 static int max_sched_granularity_ns = NSEC_PER_SEC;    /* 1 second */
 static int min_wakeup_granularity_ns;                  /* 0 usecs */
 static int max_wakeup_granularity_ns = NSEC_PER_SEC;   /* 1 second */
+#ifdef CONFIG_SMP
 static int min_sched_tunable_scaling = SCHED_TUNABLESCALING_NONE;
 static int max_sched_tunable_scaling = SCHED_TUNABLESCALING_END-1;
-#endif
+#endif /* CONFIG_SMP */
+#endif /* CONFIG_SCHED_DEBUG */
 
 #ifdef CONFIG_COMPACTION
 static int min_extfrag_threshold;
@@ -301,6 +303,7 @@ static struct ctl_table kern_table[] = {
                .extra1         = &min_wakeup_granularity_ns,
                .extra2         = &max_wakeup_granularity_ns,
        },
+#ifdef CONFIG_SMP
        {
                .procname       = "sched_tunable_scaling",
                .data           = &sysctl_sched_tunable_scaling,
@@ -347,7 +350,45 @@ static struct ctl_table kern_table[] = {
                .extra1         = &zero,
                .extra2         = &one,
        },
-#endif
+#endif /* CONFIG_SMP */
+#ifdef CONFIG_NUMA_BALANCING
+       {
+               .procname       = "numa_balancing_scan_delay_ms",
+               .data           = &sysctl_numa_balancing_scan_delay,
+               .maxlen         = sizeof(unsigned int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
+       {
+               .procname       = "numa_balancing_scan_period_min_ms",
+               .data           = &sysctl_numa_balancing_scan_period_min,
+               .maxlen         = sizeof(unsigned int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
+       {
+               .procname       = "numa_balancing_scan_period_reset",
+               .data           = &sysctl_numa_balancing_scan_period_reset,
+               .maxlen         = sizeof(unsigned int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
+       {
+               .procname       = "numa_balancing_scan_period_max_ms",
+               .data           = &sysctl_numa_balancing_scan_period_max,
+               .maxlen         = sizeof(unsigned int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
+       {
+               .procname       = "numa_balancing_scan_size_mb",
+               .data           = &sysctl_numa_balancing_scan_size,
+               .maxlen         = sizeof(unsigned int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
+#endif /* CONFIG_NUMA_BALANCING */
+#endif /* CONFIG_SCHED_DEBUG */
        {
                .procname       = "sched_rt_period_us",
                .data           = &sysctl_sched_rt_period,
index f114bf6..196b069 100644 (file)
@@ -57,7 +57,7 @@ int swiotlb_force;
  * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
  * API.
  */
-static char *io_tlb_start, *io_tlb_end;
+static phys_addr_t io_tlb_start, io_tlb_end;
 
 /*
  * The number of IO TLB blocks (in groups of 64) between io_tlb_start and
@@ -70,7 +70,7 @@ static unsigned long io_tlb_nslabs;
  */
 static unsigned long io_tlb_overflow = 32*1024;
 
-static void *io_tlb_overflow_buffer;
+static phys_addr_t io_tlb_overflow_buffer;
 
 /*
  * This is a free list describing the number of free entries available from
@@ -125,27 +125,38 @@ static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
 void swiotlb_print_info(void)
 {
        unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT;
-       phys_addr_t pstart, pend;
+       unsigned char *vstart, *vend;
 
-       pstart = virt_to_phys(io_tlb_start);
-       pend = virt_to_phys(io_tlb_end);
+       vstart = phys_to_virt(io_tlb_start);
+       vend = phys_to_virt(io_tlb_end);
 
        printk(KERN_INFO "software IO TLB [mem %#010llx-%#010llx] (%luMB) mapped at [%p-%p]\n",
-              (unsigned long long)pstart, (unsigned long long)pend - 1,
-              bytes >> 20, io_tlb_start, io_tlb_end - 1);
+              (unsigned long long)io_tlb_start,
+              (unsigned long long)io_tlb_end,
+              bytes >> 20, vstart, vend - 1);
 }
 
 void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
 {
+       void *v_overflow_buffer;
        unsigned long i, bytes;
 
        bytes = nslabs << IO_TLB_SHIFT;
 
        io_tlb_nslabs = nslabs;
-       io_tlb_start = tlb;
+       io_tlb_start = __pa(tlb);
        io_tlb_end = io_tlb_start + bytes;
 
        /*
+        * Get the overflow emergency buffer
+        */
+       v_overflow_buffer = alloc_bootmem_low_pages(PAGE_ALIGN(io_tlb_overflow));
+       if (!v_overflow_buffer)
+               panic("Cannot allocate SWIOTLB overflow buffer!\n");
+
+       io_tlb_overflow_buffer = __pa(v_overflow_buffer);
+
+       /*
         * Allocate and initialize the free list array.  This array is used
         * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
         * between io_tlb_start and io_tlb_end.
@@ -156,12 +167,6 @@ void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
        io_tlb_index = 0;
        io_tlb_orig_addr = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)));
 
-       /*
-        * Get the overflow emergency buffer
-        */
-       io_tlb_overflow_buffer = alloc_bootmem_low_pages(PAGE_ALIGN(io_tlb_overflow));
-       if (!io_tlb_overflow_buffer)
-               panic("Cannot allocate SWIOTLB overflow buffer!\n");
        if (verbose)
                swiotlb_print_info();
 }
@@ -173,6 +178,7 @@ void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
 static void __init
 swiotlb_init_with_default_size(size_t default_size, int verbose)
 {
+       unsigned char *vstart;
        unsigned long bytes;
 
        if (!io_tlb_nslabs) {
@@ -185,11 +191,11 @@ swiotlb_init_with_default_size(size_t default_size, int verbose)
        /*
         * Get IO TLB memory from the low pages
         */
-       io_tlb_start = alloc_bootmem_low_pages(PAGE_ALIGN(bytes));
-       if (!io_tlb_start)
+       vstart = alloc_bootmem_low_pages(PAGE_ALIGN(bytes));
+       if (!vstart)
                panic("Cannot allocate SWIOTLB buffer");
 
-       swiotlb_init_with_tbl(io_tlb_start, io_tlb_nslabs, verbose);
+       swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose);
 }
 
 void __init
@@ -207,6 +213,7 @@ int
 swiotlb_late_init_with_default_size(size_t default_size)
 {
        unsigned long bytes, req_nslabs = io_tlb_nslabs;
+       unsigned char *vstart = NULL;
        unsigned int order;
        int rc = 0;
 
@@ -223,14 +230,14 @@ swiotlb_late_init_with_default_size(size_t default_size)
        bytes = io_tlb_nslabs << IO_TLB_SHIFT;
 
        while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
-               io_tlb_start = (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
-                                                       order);
-               if (io_tlb_start)
+               vstart = (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
+                                                 order);
+               if (vstart)
                        break;
                order--;
        }
 
-       if (!io_tlb_start) {
+       if (!vstart) {
                io_tlb_nslabs = req_nslabs;
                return -ENOMEM;
        }
@@ -239,9 +246,9 @@ swiotlb_late_init_with_default_size(size_t default_size)
                       "for software IO TLB\n", (PAGE_SIZE << order) >> 20);
                io_tlb_nslabs = SLABS_PER_PAGE << order;
        }
-       rc = swiotlb_late_init_with_tbl(io_tlb_start, io_tlb_nslabs);
+       rc = swiotlb_late_init_with_tbl(vstart, io_tlb_nslabs);
        if (rc)
-               free_pages((unsigned long)io_tlb_start, order);
+               free_pages((unsigned long)vstart, order);
        return rc;
 }
 
@@ -249,14 +256,25 @@ int
 swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
 {
        unsigned long i, bytes;
+       unsigned char *v_overflow_buffer;
 
        bytes = nslabs << IO_TLB_SHIFT;
 
        io_tlb_nslabs = nslabs;
-       io_tlb_start = tlb;
+       io_tlb_start = virt_to_phys(tlb);
        io_tlb_end = io_tlb_start + bytes;
 
-       memset(io_tlb_start, 0, bytes);
+       memset(tlb, 0, bytes);
+
+       /*
+        * Get the overflow emergency buffer
+        */
+       v_overflow_buffer = (void *)__get_free_pages(GFP_DMA,
+                                                    get_order(io_tlb_overflow));
+       if (!v_overflow_buffer)
+               goto cleanup2;
+
+       io_tlb_overflow_buffer = virt_to_phys(v_overflow_buffer);
 
        /*
         * Allocate and initialize the free list array.  This array is used
@@ -266,7 +284,7 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
        io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL,
                                      get_order(io_tlb_nslabs * sizeof(int)));
        if (!io_tlb_list)
-               goto cleanup2;
+               goto cleanup3;
 
        for (i = 0; i < io_tlb_nslabs; i++)
                io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
@@ -277,18 +295,10 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
                                 get_order(io_tlb_nslabs *
                                           sizeof(phys_addr_t)));
        if (!io_tlb_orig_addr)
-               goto cleanup3;
+               goto cleanup4;
 
        memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(phys_addr_t));
 
-       /*
-        * Get the overflow emergency buffer
-        */
-       io_tlb_overflow_buffer = (void *)__get_free_pages(GFP_DMA,
-                                                 get_order(io_tlb_overflow));
-       if (!io_tlb_overflow_buffer)
-               goto cleanup4;
-
        swiotlb_print_info();
 
        late_alloc = 1;
@@ -296,42 +306,42 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
        return 0;
 
 cleanup4:
-       free_pages((unsigned long)io_tlb_orig_addr,
-                  get_order(io_tlb_nslabs * sizeof(phys_addr_t)));
-       io_tlb_orig_addr = NULL;
-cleanup3:
        free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
                                                         sizeof(int)));
        io_tlb_list = NULL;
+cleanup3:
+       free_pages((unsigned long)v_overflow_buffer,
+                  get_order(io_tlb_overflow));
+       io_tlb_overflow_buffer = 0;
 cleanup2:
-       io_tlb_end = NULL;
-       io_tlb_start = NULL;
+       io_tlb_end = 0;
+       io_tlb_start = 0;
        io_tlb_nslabs = 0;
        return -ENOMEM;
 }
 
 void __init swiotlb_free(void)
 {
-       if (!io_tlb_overflow_buffer)
+       if (!io_tlb_orig_addr)
                return;
 
        if (late_alloc) {
-               free_pages((unsigned long)io_tlb_overflow_buffer,
+               free_pages((unsigned long)phys_to_virt(io_tlb_overflow_buffer),
                           get_order(io_tlb_overflow));
                free_pages((unsigned long)io_tlb_orig_addr,
                           get_order(io_tlb_nslabs * sizeof(phys_addr_t)));
                free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
                                                                 sizeof(int)));
-               free_pages((unsigned long)io_tlb_start,
+               free_pages((unsigned long)phys_to_virt(io_tlb_start),
                           get_order(io_tlb_nslabs << IO_TLB_SHIFT));
        } else {
-               free_bootmem_late(__pa(io_tlb_overflow_buffer),
+               free_bootmem_late(io_tlb_overflow_buffer,
                                  PAGE_ALIGN(io_tlb_overflow));
                free_bootmem_late(__pa(io_tlb_orig_addr),
                                  PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)));
                free_bootmem_late(__pa(io_tlb_list),
                                  PAGE_ALIGN(io_tlb_nslabs * sizeof(int)));
-               free_bootmem_late(__pa(io_tlb_start),
+               free_bootmem_late(io_tlb_start,
                                  PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
        }
        io_tlb_nslabs = 0;
@@ -339,21 +349,21 @@ void __init swiotlb_free(void)
 
 static int is_swiotlb_buffer(phys_addr_t paddr)
 {
-       return paddr >= virt_to_phys(io_tlb_start) &&
-               paddr < virt_to_phys(io_tlb_end);
+       return paddr >= io_tlb_start && paddr < io_tlb_end;
 }
 
 /*
  * Bounce: copy the swiotlb buffer back to the original dma location
  */
-void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size,
-                   enum dma_data_direction dir)
+static void swiotlb_bounce(phys_addr_t orig_addr, phys_addr_t tlb_addr,
+                          size_t size, enum dma_data_direction dir)
 {
-       unsigned long pfn = PFN_DOWN(phys);
+       unsigned long pfn = PFN_DOWN(orig_addr);
+       unsigned char *vaddr = phys_to_virt(tlb_addr);
 
        if (PageHighMem(pfn_to_page(pfn))) {
                /* The buffer does not have a mapping.  Map it in and copy */
-               unsigned int offset = phys & ~PAGE_MASK;
+               unsigned int offset = orig_addr & ~PAGE_MASK;
                char *buffer;
                unsigned int sz = 0;
                unsigned long flags;
@@ -364,32 +374,31 @@ void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size,
                        local_irq_save(flags);
                        buffer = kmap_atomic(pfn_to_page(pfn));
                        if (dir == DMA_TO_DEVICE)
-                               memcpy(dma_addr, buffer + offset, sz);
+                               memcpy(vaddr, buffer + offset, sz);
                        else
-                               memcpy(buffer + offset, dma_addr, sz);
+                               memcpy(buffer + offset, vaddr, sz);
                        kunmap_atomic(buffer);
                        local_irq_restore(flags);
 
                        size -= sz;
                        pfn++;
-                       dma_addr += sz;
+                       vaddr += sz;
                        offset = 0;
                }
+       } else if (dir == DMA_TO_DEVICE) {
+               memcpy(vaddr, phys_to_virt(orig_addr), size);
        } else {
-               if (dir == DMA_TO_DEVICE)
-                       memcpy(dma_addr, phys_to_virt(phys), size);
-               else
-                       memcpy(phys_to_virt(phys), dma_addr, size);
+               memcpy(phys_to_virt(orig_addr), vaddr, size);
        }
 }
-EXPORT_SYMBOL_GPL(swiotlb_bounce);
 
-void *swiotlb_tbl_map_single(struct device *hwdev, dma_addr_t tbl_dma_addr,
-                            phys_addr_t phys, size_t size,
-                            enum dma_data_direction dir)
+phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
+                                  dma_addr_t tbl_dma_addr,
+                                  phys_addr_t orig_addr, size_t size,
+                                  enum dma_data_direction dir)
 {
        unsigned long flags;
-       char *dma_addr;
+       phys_addr_t tlb_addr;
        unsigned int nslots, stride, index, wrap;
        int i;
        unsigned long mask;
@@ -453,7 +462,7 @@ void *swiotlb_tbl_map_single(struct device *hwdev, dma_addr_t tbl_dma_addr,
                                io_tlb_list[i] = 0;
                        for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--)
                                io_tlb_list[i] = ++count;
-                       dma_addr = io_tlb_start + (index << IO_TLB_SHIFT);
+                       tlb_addr = io_tlb_start + (index << IO_TLB_SHIFT);
 
                        /*
                         * Update the indices to avoid searching in the next
@@ -471,7 +480,7 @@ void *swiotlb_tbl_map_single(struct device *hwdev, dma_addr_t tbl_dma_addr,
 
 not_found:
        spin_unlock_irqrestore(&io_tlb_lock, flags);
-       return NULL;
+       return SWIOTLB_MAP_ERROR;
 found:
        spin_unlock_irqrestore(&io_tlb_lock, flags);
 
@@ -481,11 +490,11 @@ found:
         * needed.
         */
        for (i = 0; i < nslots; i++)
-               io_tlb_orig_addr[index+i] = phys + (i << IO_TLB_SHIFT);
+               io_tlb_orig_addr[index+i] = orig_addr + (i << IO_TLB_SHIFT);
        if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
-               swiotlb_bounce(phys, dma_addr, size, DMA_TO_DEVICE);
+               swiotlb_bounce(orig_addr, tlb_addr, size, DMA_TO_DEVICE);
 
-       return dma_addr;
+       return tlb_addr;
 }
 EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single);
 
@@ -493,11 +502,10 @@ EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single);
  * Allocates bounce buffer and returns its kernel virtual address.
  */
 
-static void *
-map_single(struct device *hwdev, phys_addr_t phys, size_t size,
-          enum dma_data_direction dir)
+phys_addr_t map_single(struct device *hwdev, phys_addr_t phys, size_t size,
+                      enum dma_data_direction dir)
 {
-       dma_addr_t start_dma_addr = swiotlb_virt_to_bus(hwdev, io_tlb_start);
+       dma_addr_t start_dma_addr = phys_to_dma(hwdev, io_tlb_start);
 
        return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size, dir);
 }
@@ -505,20 +513,19 @@ map_single(struct device *hwdev, phys_addr_t phys, size_t size,
 /*
  * dma_addr is the kernel virtual address of the bounce buffer to unmap.
  */
-void
-swiotlb_tbl_unmap_single(struct device *hwdev, char *dma_addr, size_t size,
-                       enum dma_data_direction dir)
+void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
+                             size_t size, enum dma_data_direction dir)
 {
        unsigned long flags;
        int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
-       int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
-       phys_addr_t phys = io_tlb_orig_addr[index];
+       int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
+       phys_addr_t orig_addr = io_tlb_orig_addr[index];
 
        /*
         * First, sync the memory before unmapping the entry
         */
-       if (phys && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
-               swiotlb_bounce(phys, dma_addr, size, DMA_FROM_DEVICE);
+       if (orig_addr && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
+               swiotlb_bounce(orig_addr, tlb_addr, size, DMA_FROM_DEVICE);
 
        /*
         * Return the buffer to the free list by setting the corresponding
@@ -547,26 +554,27 @@ swiotlb_tbl_unmap_single(struct device *hwdev, char *dma_addr, size_t size,
 }
 EXPORT_SYMBOL_GPL(swiotlb_tbl_unmap_single);
 
-void
-swiotlb_tbl_sync_single(struct device *hwdev, char *dma_addr, size_t size,
-                       enum dma_data_direction dir,
-                       enum dma_sync_target target)
+void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr,
+                            size_t size, enum dma_data_direction dir,
+                            enum dma_sync_target target)
 {
-       int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
-       phys_addr_t phys = io_tlb_orig_addr[index];
+       int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
+       phys_addr_t orig_addr = io_tlb_orig_addr[index];
 
-       phys += ((unsigned long)dma_addr & ((1 << IO_TLB_SHIFT) - 1));
+       orig_addr += (unsigned long)tlb_addr & ((1 << IO_TLB_SHIFT) - 1);
 
        switch (target) {
        case SYNC_FOR_CPU:
                if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
-                       swiotlb_bounce(phys, dma_addr, size, DMA_FROM_DEVICE);
+                       swiotlb_bounce(orig_addr, tlb_addr,
+                                      size, DMA_FROM_DEVICE);
                else
                        BUG_ON(dir != DMA_TO_DEVICE);
                break;
        case SYNC_FOR_DEVICE:
                if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
-                       swiotlb_bounce(phys, dma_addr, size, DMA_TO_DEVICE);
+                       swiotlb_bounce(orig_addr, tlb_addr,
+                                      size, DMA_TO_DEVICE);
                else
                        BUG_ON(dir != DMA_FROM_DEVICE);
                break;
@@ -589,12 +597,15 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
                dma_mask = hwdev->coherent_dma_mask;
 
        ret = (void *)__get_free_pages(flags, order);
-       if (ret && swiotlb_virt_to_bus(hwdev, ret) + size - 1 > dma_mask) {
-               /*
-                * The allocated memory isn't reachable by the device.
-                */
-               free_pages((unsigned long) ret, order);
-               ret = NULL;
+       if (ret) {
+               dev_addr = swiotlb_virt_to_bus(hwdev, ret);
+               if (dev_addr + size - 1 > dma_mask) {
+                       /*
+                        * The allocated memory isn't reachable by the device.
+                        */
+                       free_pages((unsigned long) ret, order);
+                       ret = NULL;
+               }
        }
        if (!ret) {
                /*
@@ -602,25 +613,29 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
                 * GFP_DMA memory; fall back on map_single(), which
                 * will grab memory from the lowest available address range.
                 */
-               ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE);
-               if (!ret)
+               phys_addr_t paddr = map_single(hwdev, 0, size, DMA_FROM_DEVICE);
+               if (paddr == SWIOTLB_MAP_ERROR)
                        return NULL;
-       }
 
-       memset(ret, 0, size);
-       dev_addr = swiotlb_virt_to_bus(hwdev, ret);
+               ret = phys_to_virt(paddr);
+               dev_addr = phys_to_dma(hwdev, paddr);
 
-       /* Confirm address can be DMA'd by device */
-       if (dev_addr + size - 1 > dma_mask) {
-               printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
-                      (unsigned long long)dma_mask,
-                      (unsigned long long)dev_addr);
+               /* Confirm address can be DMA'd by device */
+               if (dev_addr + size - 1 > dma_mask) {
+                       printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
+                              (unsigned long long)dma_mask,
+                              (unsigned long long)dev_addr);
 
-               /* DMA_TO_DEVICE to avoid memcpy in unmap_single */
-               swiotlb_tbl_unmap_single(hwdev, ret, size, DMA_TO_DEVICE);
-               return NULL;
+                       /* DMA_TO_DEVICE to avoid memcpy in unmap_single */
+                       swiotlb_tbl_unmap_single(hwdev, paddr,
+                                                size, DMA_TO_DEVICE);
+                       return NULL;
+               }
        }
+
        *dma_handle = dev_addr;
+       memset(ret, 0, size);
+
        return ret;
 }
 EXPORT_SYMBOL(swiotlb_alloc_coherent);
@@ -636,7 +651,7 @@ swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
                free_pages((unsigned long)vaddr, get_order(size));
        else
                /* DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single */
-               swiotlb_tbl_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE);
+               swiotlb_tbl_unmap_single(hwdev, paddr, size, DMA_TO_DEVICE);
 }
 EXPORT_SYMBOL(swiotlb_free_coherent);
 
@@ -677,9 +692,8 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
                            enum dma_data_direction dir,
                            struct dma_attrs *attrs)
 {
-       phys_addr_t phys = page_to_phys(page) + offset;
+       phys_addr_t map, phys = page_to_phys(page) + offset;
        dma_addr_t dev_addr = phys_to_dma(dev, phys);
-       void *map;
 
        BUG_ON(dir == DMA_NONE);
        /*
@@ -690,23 +704,19 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
        if (dma_capable(dev, dev_addr, size) && !swiotlb_force)
                return dev_addr;
 
-       /*
-        * Oh well, have to allocate and map a bounce buffer.
-        */
+       /* Oh well, have to allocate and map a bounce buffer. */
        map = map_single(dev, phys, size, dir);
-       if (!map) {
+       if (map == SWIOTLB_MAP_ERROR) {
                swiotlb_full(dev, size, dir, 1);
-               map = io_tlb_overflow_buffer;
+               return phys_to_dma(dev, io_tlb_overflow_buffer);
        }
 
-       dev_addr = swiotlb_virt_to_bus(dev, map);
+       dev_addr = phys_to_dma(dev, map);
 
-       /*
-        * Ensure that the address returned is DMA'ble
-        */
+       /* Ensure that the address returned is DMA'ble */
        if (!dma_capable(dev, dev_addr, size)) {
                swiotlb_tbl_unmap_single(dev, map, size, dir);
-               dev_addr = swiotlb_virt_to_bus(dev, io_tlb_overflow_buffer);
+               return phys_to_dma(dev, io_tlb_overflow_buffer);
        }
 
        return dev_addr;
@@ -729,7 +739,7 @@ static void unmap_single(struct device *hwdev, dma_addr_t dev_addr,
        BUG_ON(dir == DMA_NONE);
 
        if (is_swiotlb_buffer(paddr)) {
-               swiotlb_tbl_unmap_single(hwdev, phys_to_virt(paddr), size, dir);
+               swiotlb_tbl_unmap_single(hwdev, paddr, size, dir);
                return;
        }
 
@@ -773,8 +783,7 @@ swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
        BUG_ON(dir == DMA_NONE);
 
        if (is_swiotlb_buffer(paddr)) {
-               swiotlb_tbl_sync_single(hwdev, phys_to_virt(paddr), size, dir,
-                                      target);
+               swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target);
                return;
        }
 
@@ -831,9 +840,9 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
 
                if (swiotlb_force ||
                    !dma_capable(hwdev, dev_addr, sg->length)) {
-                       void *map = map_single(hwdev, sg_phys(sg),
-                                              sg->length, dir);
-                       if (!map) {
+                       phys_addr_t map = map_single(hwdev, sg_phys(sg),
+                                                    sg->length, dir);
+                       if (map == SWIOTLB_MAP_ERROR) {
                                /* Don't panic here, we expect map_sg users
                                   to do proper error handling. */
                                swiotlb_full(hwdev, sg->length, dir, 0);
@@ -842,7 +851,7 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
                                sgl[0].dma_length = 0;
                                return 0;
                        }
-                       sg->dma_address = swiotlb_virt_to_bus(hwdev, map);
+                       sg->dma_address = phys_to_dma(hwdev, map);
                } else
                        sg->dma_address = dev_addr;
                sg->dma_length = sg->length;
@@ -925,7 +934,7 @@ EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
 int
 swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
 {
-       return (dma_addr == swiotlb_virt_to_bus(hwdev, io_tlb_overflow_buffer));
+       return (dma_addr == phys_to_dma(hwdev, io_tlb_overflow_buffer));
 }
 EXPORT_SYMBOL(swiotlb_dma_mapping_error);
 
@@ -938,6 +947,6 @@ EXPORT_SYMBOL(swiotlb_dma_mapping_error);
 int
 swiotlb_dma_supported(struct device *hwdev, u64 mask)
 {
-       return swiotlb_virt_to_bus(hwdev, io_tlb_end - 1) <= mask;
+       return phys_to_dma(hwdev, io_tlb_end - 1) <= mask;
 }
 EXPORT_SYMBOL(swiotlb_dma_supported);
index d3ca2b3..bd6a6ca 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/module.h>
 #include <linux/writeback.h>
 #include <linux/device.h>
+#include <linux/slab.h>
 #include <trace/events/writeback.h>
 
 static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
@@ -221,12 +222,63 @@ static ssize_t max_ratio_store(struct device *dev,
 }
 BDI_SHOW(max_ratio, bdi->max_ratio)
 
+static ssize_t cpu_list_store(struct device *dev,
+               struct device_attribute *attr, const char *buf, size_t count)
+{
+       struct backing_dev_info *bdi = dev_get_drvdata(dev);
+       struct bdi_writeback *wb = &bdi->wb;
+       cpumask_var_t newmask;
+       ssize_t ret;
+       struct task_struct *task;
+
+       if (!alloc_cpumask_var(&newmask, GFP_KERNEL))
+               return -ENOMEM;
+
+       ret = cpulist_parse(buf, newmask);
+       if (!ret) {
+               spin_lock_bh(&bdi->wb_lock);
+               task = wb->task;
+               if (task)
+                       get_task_struct(task);
+               spin_unlock_bh(&bdi->wb_lock);
+
+               mutex_lock(&bdi->flusher_cpumask_lock);
+               if (task) {
+                       ret = set_cpus_allowed_ptr(task, newmask);
+                       put_task_struct(task);
+               }
+               if (ret == 0) {
+                       cpumask_copy(bdi->flusher_cpumask, newmask);
+                       ret = count;
+               }
+               mutex_unlock(&bdi->flusher_cpumask_lock);
+
+       }
+       free_cpumask_var(newmask);
+
+       return ret;
+}
+
+static ssize_t cpu_list_show(struct device *dev,
+               struct device_attribute *attr, char *page)
+{
+       struct backing_dev_info *bdi = dev_get_drvdata(dev);
+       ssize_t ret;
+
+       mutex_lock(&bdi->flusher_cpumask_lock);
+       ret = cpulist_scnprintf(page, PAGE_SIZE-1, bdi->flusher_cpumask);
+       mutex_unlock(&bdi->flusher_cpumask_lock);
+
+       return ret;
+}
+
 #define __ATTR_RW(attr) __ATTR(attr, 0644, attr##_show, attr##_store)
 
 static struct device_attribute bdi_dev_attrs[] = {
        __ATTR_RW(read_ahead_kb),
        __ATTR_RW(min_ratio),
        __ATTR_RW(max_ratio),
+       __ATTR_RW(cpu_list),
        __ATTR_NULL,
 };
 
@@ -428,6 +480,7 @@ static int bdi_forker_thread(void *ptr)
                                writeback_inodes_wb(&bdi->wb, 1024,
                                                    WB_REASON_FORKER_THREAD);
                        } else {
+                               int ret;
                                /*
                                 * The spinlock makes sure we do not lose
                                 * wake-ups when racing with 'bdi_queue_work()'.
@@ -437,6 +490,14 @@ static int bdi_forker_thread(void *ptr)
                                spin_lock_bh(&bdi->wb_lock);
                                bdi->wb.task = task;
                                spin_unlock_bh(&bdi->wb_lock);
+                               mutex_lock(&bdi->flusher_cpumask_lock);
+                               ret = set_cpus_allowed_ptr(task,
+                                                       bdi->flusher_cpumask);
+                               mutex_unlock(&bdi->flusher_cpumask_lock);
+                               if (ret)
+                                       printk_once("%s: failed to bind flusher"
+                                                   " thread %s, error %d\n",
+                                                   __func__, task->comm, ret);
                                wake_up_process(task);
                        }
                        bdi_clear_pending(bdi);
@@ -509,6 +570,17 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent,
                                                dev_name(dev));
                if (IS_ERR(wb->task))
                        return PTR_ERR(wb->task);
+       } else {
+               int node;
+               /*
+                * Set up a default cpumask for the flusher threads that
+                * includes all cpus on the same numa node as the device.
+                * The mask may be overridden via sysfs.
+                */
+               node = dev_to_node(bdi->dev);
+               if (node != NUMA_NO_NODE)
+                       cpumask_copy(bdi->flusher_cpumask,
+                                    cpumask_of_node(node));
        }
 
        bdi_debug_register(bdi, dev_name(dev));
@@ -634,6 +706,15 @@ int bdi_init(struct backing_dev_info *bdi)
 
        bdi_wb_init(&bdi->wb, bdi);
 
+       if (!bdi_cap_flush_forker(bdi)) {
+               bdi->flusher_cpumask = kmalloc(sizeof(cpumask_t), GFP_KERNEL);
+               if (!bdi->flusher_cpumask)
+                       return -ENOMEM;
+               cpumask_setall(bdi->flusher_cpumask);
+               mutex_init(&bdi->flusher_cpumask_lock);
+       } else
+               bdi->flusher_cpumask = NULL;
+
        for (i = 0; i < NR_BDI_STAT_ITEMS; i++) {
                err = percpu_counter_init(&bdi->bdi_stat[i], 0);
                if (err)
@@ -656,6 +737,7 @@ int bdi_init(struct backing_dev_info *bdi)
 err:
                while (i--)
                        percpu_counter_destroy(&bdi->bdi_stat[i]);
+               kfree(bdi->flusher_cpumask);
        }
 
        return err;
@@ -683,6 +765,8 @@ void bdi_destroy(struct backing_dev_info *bdi)
 
        bdi_unregister(bdi);
 
+       kfree(bdi->flusher_cpumask);
+
        /*
         * If bdi_unregister() had already been called earlier, the
         * wakeup_timer could still be armed because bdi_prune_sb()
index 1297912..5ad7f4f 100644 (file)
@@ -303,6 +303,10 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
        if (blockpfn == end_pfn)
                update_pageblock_skip(cc, valid_page, total_isolated, false);
 
+       count_vm_events(COMPACTFREE_SCANNED, nr_scanned);
+       if (total_isolated)
+               count_vm_events(COMPACTISOLATED, total_isolated);
+
        return total_isolated;
 }
 
@@ -609,6 +613,10 @@ next_pageblock:
 
        trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
 
+       count_vm_events(COMPACTMIGRATE_SCANNED, nr_scanned);
+       if (nr_isolated)
+               count_vm_events(COMPACTISOLATED, nr_isolated);
+
        return low_pfn;
 }
 
@@ -1015,14 +1023,11 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
                nr_migrate = cc->nr_migratepages;
                err = migrate_pages(&cc->migratepages, compaction_alloc,
                                (unsigned long)cc, false,
-                               cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC);
+                               cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC,
+                               MR_COMPACTION);
                update_nr_listpages(cc);
                nr_remaining = cc->nr_migratepages;
 
-               count_vm_event(COMPACTBLOCKS);
-               count_vm_events(COMPACTPAGES, nr_migrate - nr_remaining);
-               if (nr_remaining)
-                       count_vm_events(COMPACTPAGEFAILED, nr_remaining);
                trace_mm_compaction_migratepages(nr_migrate - nr_remaining,
                                                nr_remaining);
 
index 827d9c8..32754ee 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/freezer.h>
 #include <linux/mman.h>
 #include <linux/pagemap.h>
+#include <linux/migrate.h>
 
 #include <asm/tlb.h>
 #include <asm/pgalloc.h>
@@ -690,7 +691,7 @@ out:
 }
 __setup("transparent_hugepage=", setup_transparent_hugepage);
 
-static inline pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
+pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
 {
        if (likely(vma->vm_flags & VM_WRITE))
                pmd = pmd_mkwrite(pmd);
@@ -848,7 +849,8 @@ out:
         * run pte_offset_map on the pmd, if an huge pmd could
         * materialize from under us from a different thread.
         */
-       if (unlikely(__pte_alloc(mm, vma, pmd, address)))
+       if (unlikely(pmd_none(*pmd)) &&
+           unlikely(__pte_alloc(mm, vma, pmd, address)))
                return VM_FAULT_OOM;
        /* if an huge pmd materialized from under us just retry later */
        if (unlikely(pmd_trans_huge(*pmd)))
@@ -1287,6 +1289,81 @@ out:
        return page;
 }
 
+/* NUMA hinting page fault entry point for trans huge pmds */
+int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
+                               unsigned long addr, pmd_t pmd, pmd_t *pmdp)
+{
+       struct page *page;
+       unsigned long haddr = addr & HPAGE_PMD_MASK;
+       int target_nid;
+       int current_nid = -1;
+       bool migrated;
+       bool page_locked = false;
+
+       spin_lock(&mm->page_table_lock);
+       if (unlikely(!pmd_same(pmd, *pmdp)))
+               goto out_unlock;
+
+       page = pmd_page(pmd);
+       get_page(page);
+       current_nid = page_to_nid(page);
+       count_vm_numa_event(NUMA_HINT_FAULTS);
+       if (current_nid == numa_node_id())
+               count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
+
+       target_nid = mpol_misplaced(page, vma, haddr);
+       if (target_nid == -1) {
+               put_page(page);
+               goto clear_pmdnuma;
+       }
+
+       /* Acquire the page lock to serialise THP migrations */
+       spin_unlock(&mm->page_table_lock);
+       lock_page(page);
+       page_locked = true;
+
+       /* Confirm the PTE did not while locked */
+       spin_lock(&mm->page_table_lock);
+       if (unlikely(!pmd_same(pmd, *pmdp))) {
+               unlock_page(page);
+               put_page(page);
+               goto out_unlock;
+       }
+       spin_unlock(&mm->page_table_lock);
+
+       /* Migrate the THP to the requested node */
+       migrated = migrate_misplaced_transhuge_page(mm, vma,
+                               pmdp, pmd, addr,
+                               page, target_nid);
+       if (migrated)
+               current_nid = target_nid;
+       else {
+               spin_lock(&mm->page_table_lock);
+               if (unlikely(!pmd_same(pmd, *pmdp))) {
+                       unlock_page(page);
+                       goto out_unlock;
+               }
+               goto clear_pmdnuma;
+       }
+
+       task_numa_fault(current_nid, HPAGE_PMD_NR, migrated);
+       return 0;
+
+clear_pmdnuma:
+       pmd = pmd_mknonnuma(pmd);
+       set_pmd_at(mm, haddr, pmdp, pmd);
+       VM_BUG_ON(pmd_numa(*pmdp));
+       update_mmu_cache_pmd(vma, addr, pmdp);
+       if (page_locked)
+               unlock_page(page);
+
+out_unlock:
+       spin_unlock(&mm->page_table_lock);
+       if (current_nid != -1)
+               task_numa_fault(current_nid, HPAGE_PMD_NR, migrated);
+       return 0;
+}
+
 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
                 pmd_t *pmd, unsigned long addr)
 {
@@ -1375,7 +1452,7 @@ out:
 }
 
 int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
-               unsigned long addr, pgprot_t newprot)
+               unsigned long addr, pgprot_t newprot, int prot_numa)
 {
        struct mm_struct *mm = vma->vm_mm;
        int ret = 0;
@@ -1383,8 +1460,18 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
        if (__pmd_trans_huge_lock(pmd, vma) == 1) {
                pmd_t entry;
                entry = pmdp_get_and_clear(mm, addr, pmd);
-               entry = pmd_modify(entry, newprot);
-               BUG_ON(pmd_write(entry));
+               if (!prot_numa) {
+                       entry = pmd_modify(entry, newprot);
+                       BUG_ON(pmd_write(entry));
+               } else {
+                       struct page *page = pmd_page(*pmd);
+
+                       /* only check non-shared pages */
+                       if (page_mapcount(page) == 1 &&
+                           !pmd_numa(*pmd)) {
+                               entry = pmd_mknuma(entry);
+                       }
+               }
                set_pmd_at(mm, addr, pmd, entry);
                spin_unlock(&vma->vm_mm->page_table_lock);
                ret = 1;
@@ -1474,7 +1561,7 @@ static int __split_huge_page_splitting(struct page *page,
                 * We can't temporarily set the pmd to null in order
                 * to split it, the pmd must remain marked huge at all
                 * times or the VM won't take the pmd_trans_huge paths
-                * and it won't wait on the anon_vma->root->mutex to
+                * and it won't wait on the anon_vma->root->rwsem to
                 * serialize against split_huge_page*.
                 */
                pmdp_splitting_flush(vma, address, pmd);
@@ -1565,6 +1652,7 @@ static void __split_huge_page_refcount(struct page *page)
                page_tail->mapping = page->mapping;
 
                page_tail->index = page->index + i;
+               page_xchg_last_nid(page_tail, page_last_nid(page));
 
                BUG_ON(!PageAnon(page_tail));
                BUG_ON(!PageUptodate(page_tail));
@@ -1632,6 +1720,8 @@ static int __split_huge_page_map(struct page *page,
                                BUG_ON(page_mapcount(page) != 1);
                        if (!pmd_young(*pmd))
                                entry = pte_mkold(entry);
+                       if (pmd_numa(*pmd))
+                               entry = pte_mknuma(entry);
                        pte = pte_offset_map(&_pmd, haddr);
                        BUG_ON(!pte_none(*pte));
                        set_pte_at(mm, haddr, pte, entry);
@@ -1674,7 +1764,7 @@ static int __split_huge_page_map(struct page *page,
        return ret;
 }
 
-/* must be called with anon_vma->root->mutex hold */
+/* must be called with anon_vma->root->rwsem held */
 static void __split_huge_page(struct page *page,
                              struct anon_vma *anon_vma)
 {
@@ -1729,7 +1819,7 @@ int split_huge_page(struct page *page)
 
        BUG_ON(is_huge_zero_pfn(page_to_pfn(page)));
        BUG_ON(!PageAnon(page));
-       anon_vma = page_lock_anon_vma(page);
+       anon_vma = page_lock_anon_vma_read(page);
        if (!anon_vma)
                goto out;
        ret = 0;
@@ -1742,7 +1832,7 @@ int split_huge_page(struct page *page)
 
        BUG_ON(PageCompound(page));
 out_unlock:
-       page_unlock_anon_vma(anon_vma);
+       page_unlock_anon_vma_read(anon_vma);
 out:
        return ret;
 }
@@ -2234,7 +2324,7 @@ static void collapse_huge_page(struct mm_struct *mm,
        if (pmd_trans_huge(*pmd))
                goto out;
 
-       anon_vma_lock(vma->anon_vma);
+       anon_vma_lock_write(vma->anon_vma);
 
        pte = pte_offset_map(pmd, address);
        ptl = pte_lockptr(mm, pmd);
index 88e7293..e5318c7 100644 (file)
@@ -3016,7 +3016,7 @@ same_page:
        return i ? i : -EFAULT;
 }
 
-void hugetlb_change_protection(struct vm_area_struct *vma,
+unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
                unsigned long address, unsigned long end, pgprot_t newprot)
 {
        struct mm_struct *mm = vma->vm_mm;
@@ -3024,6 +3024,7 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
        pte_t *ptep;
        pte_t pte;
        struct hstate *h = hstate_vma(vma);
+       unsigned long pages = 0;
 
        BUG_ON(address >= end);
        flush_cache_range(vma, address, end);
@@ -3034,12 +3035,15 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
                ptep = huge_pte_offset(mm, address);
                if (!ptep)
                        continue;
-               if (huge_pmd_unshare(mm, &address, ptep))
+               if (huge_pmd_unshare(mm, &address, ptep)) {
+                       pages++;
                        continue;
+               }
                if (!huge_pte_none(huge_ptep_get(ptep))) {
                        pte = huge_ptep_get_and_clear(mm, address, ptep);
                        pte = pte_mkhuge(pte_modify(pte, newprot));
                        set_huge_pte_at(mm, address, ptep, pte);
+                       pages++;
                }
        }
        spin_unlock(&mm->page_table_lock);
@@ -3051,6 +3055,8 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
         */
        flush_tlb_range(vma, start, end);
        mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
+
+       return pages << h->order;
 }
 
 int hugetlb_reserve_pages(struct inode *inode,
index 52d1fa9..d597f94 100644 (file)
@@ -217,15 +217,18 @@ static inline void mlock_migrate_page(struct page *newpage, struct page *page)
 {
        if (TestClearPageMlocked(page)) {
                unsigned long flags;
+               int nr_pages = hpage_nr_pages(page);
 
                local_irq_save(flags);
-               __dec_zone_page_state(page, NR_MLOCK);
+               __mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
                SetPageMlocked(newpage);
-               __inc_zone_page_state(newpage, NR_MLOCK);
+               __mod_zone_page_state(page_zone(newpage), NR_MLOCK, nr_pages);
                local_irq_restore(flags);
        }
 }
 
+extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
+
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 extern unsigned long vma_address(struct page *page,
                                 struct vm_area_struct *vma);
index 382d930..82dfb4b 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1624,7 +1624,7 @@ again:
                struct anon_vma_chain *vmac;
                struct vm_area_struct *vma;
 
-               anon_vma_lock(anon_vma);
+               anon_vma_lock_write(anon_vma);
                anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root,
                                               0, ULONG_MAX) {
                        vma = vmac->vma;
@@ -1678,7 +1678,7 @@ again:
                struct anon_vma_chain *vmac;
                struct vm_area_struct *vma;
 
-               anon_vma_lock(anon_vma);
+               anon_vma_lock_write(anon_vma);
                anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root,
                                               0, ULONG_MAX) {
                        vma = vmac->vma;
@@ -1731,7 +1731,7 @@ again:
                struct anon_vma_chain *vmac;
                struct vm_area_struct *vma;
 
-               anon_vma_lock(anon_vma);
+               anon_vma_lock_write(anon_vma);
                anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root,
                                               0, ULONG_MAX) {
                        vma = vmac->vma;
index 6c05592..bbfac50 100644 (file)
@@ -3289,15 +3289,18 @@ void mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
                                  struct mem_cgroup **memcgp)
 {
        struct mem_cgroup *memcg = NULL;
+       unsigned int nr_pages = 1;
        struct page_cgroup *pc;
        enum charge_type ctype;
 
        *memcgp = NULL;
 
-       VM_BUG_ON(PageTransHuge(page));
        if (mem_cgroup_disabled())
                return;
 
+       if (PageTransHuge(page))
+               nr_pages <<= compound_order(page);
+
        pc = lookup_page_cgroup(page);
        lock_page_cgroup(pc);
        if (PageCgroupUsed(pc)) {
@@ -3359,7 +3362,7 @@ void mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
         * charged to the res_counter since we plan on replacing the
         * old one and only one page is going to be left afterwards.
         */
-       __mem_cgroup_commit_charge(memcg, newpage, 1, ctype, false);
+       __mem_cgroup_commit_charge(memcg, newpage, nr_pages, ctype, false);
 }
 
 /* remove redundant charge if migration failed*/
index 108c52f..c6e4dd3 100644 (file)
@@ -402,7 +402,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
        struct anon_vma *av;
        pgoff_t pgoff;
 
-       av = page_lock_anon_vma(page);
+       av = page_lock_anon_vma_read(page);
        if (av == NULL) /* Not actually mapped anymore */
                return;
 
@@ -423,7 +423,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
                }
        }
        read_unlock(&tasklist_lock);
-       page_unlock_anon_vma(av);
+       page_unlock_anon_vma_read(av);
 }
 
 /*
@@ -1566,7 +1566,8 @@ int soft_offline_page(struct page *page, int flags)
                                            page_is_file_cache(page));
                list_add(&page->lru, &pagelist);
                ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL,
-                                                       false, MIGRATE_SYNC);
+                                                       false, MIGRATE_SYNC,
+                                                       MR_MEMORY_FAILURE);
                if (ret) {
                        putback_lru_pages(&pagelist);
                        pr_info("soft offline: %#lx: migration failed %d, type %lx\n",
index db2e9e7..e6a3b93 100644 (file)
@@ -57,6 +57,7 @@
 #include <linux/swapops.h>
 #include <linux/elf.h>
 #include <linux/gfp.h>
+#include <linux/migrate.h>
 
 #include <asm/io.h>
 #include <asm/pgalloc.h>
@@ -1503,6 +1504,8 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
                page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE);
                goto out;
        }
+       if ((flags & FOLL_NUMA) && pmd_numa(*pmd))
+               goto no_page_table;
        if (pmd_trans_huge(*pmd)) {
                if (flags & FOLL_SPLIT) {
                        split_huge_page_pmd(vma, address, pmd);
@@ -1532,6 +1535,8 @@ split_fallthrough:
        pte = *ptep;
        if (!pte_present(pte))
                goto no_page;
+       if ((flags & FOLL_NUMA) && pte_numa(pte))
+               goto no_page;
        if ((flags & FOLL_WRITE) && !pte_write(pte))
                goto unlock;
 
@@ -1683,6 +1688,19 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                        (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
        vm_flags &= (gup_flags & FOLL_FORCE) ?
                        (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
+
+       /*
+        * If FOLL_FORCE and FOLL_NUMA are both set, handle_mm_fault
+        * would be called on PROT_NONE ranges. We must never invoke
+        * handle_mm_fault on PROT_NONE ranges or the NUMA hinting
+        * page faults would unprotect the PROT_NONE ranges if
+        * _PAGE_NUMA and _PAGE_PROTNONE are sharing the same pte/pmd
+        * bitflag. So to avoid that, don't set FOLL_NUMA if
+        * FOLL_FORCE is set.
+        */
+       if (!(gup_flags & FOLL_FORCE))
+               gup_flags |= FOLL_NUMA;
+
        i = 0;
 
        do {
@@ -3412,6 +3430,169 @@ static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
 }
 
+int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
+                               unsigned long addr, int current_nid)
+{
+       get_page(page);
+
+       count_vm_numa_event(NUMA_HINT_FAULTS);
+       if (current_nid == numa_node_id())
+               count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
+
+       return mpol_misplaced(page, vma, addr);
+}
+
+int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
+                  unsigned long addr, pte_t pte, pte_t *ptep, pmd_t *pmd)
+{
+       struct page *page = NULL;
+       spinlock_t *ptl;
+       int current_nid = -1;
+       int target_nid;
+       bool migrated = false;
+
+       /*
+       * The "pte" at this point cannot be used safely without
+       * validation through pte_unmap_same(). It's of NUMA type but
+       * the pfn may be screwed if the read is non atomic.
+       *
+       * ptep_modify_prot_start is not called as this is clearing
+       * the _PAGE_NUMA bit and it is not really expected that there
+       * would be concurrent hardware modifications to the PTE.
+       */
+       ptl = pte_lockptr(mm, pmd);
+       spin_lock(ptl);
+       if (unlikely(!pte_same(*ptep, pte))) {
+               pte_unmap_unlock(ptep, ptl);
+               goto out;
+       }
+
+       pte = pte_mknonnuma(pte);
+       set_pte_at(mm, addr, ptep, pte);
+       update_mmu_cache(vma, addr, ptep);
+
+       page = vm_normal_page(vma, addr, pte);
+       if (!page) {
+               pte_unmap_unlock(ptep, ptl);
+               return 0;
+       }
+
+       current_nid = page_to_nid(page);
+       target_nid = numa_migrate_prep(page, vma, addr, current_nid);
+       pte_unmap_unlock(ptep, ptl);
+       if (target_nid == -1) {
+               /*
+                * Account for the fault against the current node if it not
+                * being replaced regardless of where the page is located.
+                */
+               current_nid = numa_node_id();
+               put_page(page);
+               goto out;
+       }
+
+       /* Migrate to the requested node */
+       migrated = migrate_misplaced_page(page, target_nid);
+       if (migrated)
+               current_nid = target_nid;
+
+out:
+       if (current_nid != -1)
+               task_numa_fault(current_nid, 1, migrated);
+       return 0;
+}
+
+/* NUMA hinting page fault entry point for regular pmds */
+#ifdef CONFIG_NUMA_BALANCING
+static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
+                    unsigned long addr, pmd_t *pmdp)
+{
+       pmd_t pmd;
+       pte_t *pte, *orig_pte;
+       unsigned long _addr = addr & PMD_MASK;
+       unsigned long offset;
+       spinlock_t *ptl;
+       bool numa = false;
+       int local_nid = numa_node_id();
+
+       spin_lock(&mm->page_table_lock);
+       pmd = *pmdp;
+       if (pmd_numa(pmd)) {
+               set_pmd_at(mm, _addr, pmdp, pmd_mknonnuma(pmd));
+               numa = true;
+       }
+       spin_unlock(&mm->page_table_lock);
+
+       if (!numa)
+               return 0;
+
+       /* we're in a page fault so some vma must be in the range */
+       BUG_ON(!vma);
+       BUG_ON(vma->vm_start >= _addr + PMD_SIZE);
+       offset = max(_addr, vma->vm_start) & ~PMD_MASK;
+       VM_BUG_ON(offset >= PMD_SIZE);
+       orig_pte = pte = pte_offset_map_lock(mm, pmdp, _addr, &ptl);
+       pte += offset >> PAGE_SHIFT;
+       for (addr = _addr + offset; addr < _addr + PMD_SIZE; pte++, addr += PAGE_SIZE) {
+               pte_t pteval = *pte;
+               struct page *page;
+               int curr_nid = local_nid;
+               int target_nid;
+               bool migrated;
+               if (!pte_present(pteval))
+                       continue;
+               if (!pte_numa(pteval))
+                       continue;
+               if (addr >= vma->vm_end) {
+                       vma = find_vma(mm, addr);
+                       /* there's a pte present so there must be a vma */
+                       BUG_ON(!vma);
+                       BUG_ON(addr < vma->vm_start);
+               }
+               if (pte_numa(pteval)) {
+                       pteval = pte_mknonnuma(pteval);
+                       set_pte_at(mm, addr, pte, pteval);
+               }
+               page = vm_normal_page(vma, addr, pteval);
+               if (unlikely(!page))
+                       continue;
+               /* only check non-shared pages */
+               if (unlikely(page_mapcount(page) != 1))
+                       continue;
+
+               /*
+                * Note that the NUMA fault is later accounted to either
+                * the node that is currently running or where the page is
+                * migrated to.
+                */
+               curr_nid = local_nid;
+               target_nid = numa_migrate_prep(page, vma, addr,
+                                              page_to_nid(page));
+               if (target_nid == -1) {
+                       put_page(page);
+                       continue;
+               }
+
+               /* Migrate to the requested node */
+               pte_unmap_unlock(pte, ptl);
+               migrated = migrate_misplaced_page(page, target_nid);
+               if (migrated)
+                       curr_nid = target_nid;
+               task_numa_fault(curr_nid, 1, migrated);
+
+               pte = pte_offset_map_lock(mm, pmdp, addr, &ptl);
+       }
+       pte_unmap_unlock(orig_pte, ptl);
+
+       return 0;
+}
+#else
+static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
+                    unsigned long addr, pmd_t *pmdp)
+{
+       BUG();
+}
+#endif /* CONFIG_NUMA_BALANCING */
+
 /*
  * These routines also need to handle stuff like marking pages dirty
  * and/or accessed for architectures that don't do it in hardware (most
@@ -3450,6 +3631,9 @@ int handle_pte_fault(struct mm_struct *mm,
                                        pte, pmd, flags, entry);
        }
 
+       if (pte_numa(entry))
+               return do_numa_page(mm, vma, address, entry, pte, pmd);
+
        ptl = pte_lockptr(mm, pmd);
        spin_lock(ptl);
        if (unlikely(!pte_same(*pte, entry)))
@@ -3520,8 +3704,11 @@ retry:
                if (pmd_trans_huge(orig_pmd)) {
                        unsigned int dirty = flags & FAULT_FLAG_WRITE;
 
-                       if (dirty && !pmd_write(orig_pmd) &&
-                           !pmd_trans_splitting(orig_pmd)) {
+                       if (pmd_numa(orig_pmd))
+                               return do_huge_pmd_numa_page(mm, vma, address,
+                                                            orig_pmd, pmd);
+
+                       if (dirty && !pmd_write(orig_pmd)) {
                                ret = do_huge_pmd_wp_page(mm, vma, address, pmd,
                                                          orig_pmd);
                                /*
@@ -3536,16 +3723,21 @@ retry:
                                huge_pmd_set_accessed(mm, vma, address, pmd,
                                                      orig_pmd, dirty);
                        }
+
                        return 0;
                }
        }
 
+       if (pmd_numa(*pmd))
+               return do_pmd_numa_page(mm, vma, address, pmd);
+
        /*
         * Use __pte_alloc instead of pte_alloc_map, because we can't
         * run pte_offset_map on the pmd, if an huge pmd could
         * materialize from under us from a different thread.
         */
-       if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
+       if (unlikely(pmd_none(*pmd)) &&
+           unlikely(__pte_alloc(mm, vma, pmd, address)))
                return VM_FAULT_OOM;
        /* if an huge pmd materialized from under us just retry later */
        if (unlikely(pmd_trans_huge(*pmd)))
index 518baa8..962e353 100644 (file)
@@ -1055,7 +1055,8 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
                 * migrate_pages returns # of failed pages.
                 */
                ret = migrate_pages(&source, alloc_migrate_target, 0,
-                                                       true, MIGRATE_SYNC);
+                                                       true, MIGRATE_SYNC,
+                                                       MR_MEMORY_HOTPLUG);
                if (ret)
                        putback_lru_pages(&source);
        }
index aaf5456..d1b315e 100644 (file)
@@ -90,6 +90,7 @@
 #include <linux/syscalls.h>
 #include <linux/ctype.h>
 #include <linux/mm_inline.h>
+#include <linux/mmu_notifier.h>
 
 #include <asm/tlbflush.h>
 #include <asm/uaccess.h>
@@ -117,6 +118,26 @@ static struct mempolicy default_policy = {
        .flags = MPOL_F_LOCAL,
 };
 
+static struct mempolicy preferred_node_policy[MAX_NUMNODES];
+
+static struct mempolicy *get_task_policy(struct task_struct *p)
+{
+       struct mempolicy *pol = p->mempolicy;
+       int node;
+
+       if (!pol) {
+               node = numa_node_id();
+               if (node != -1)
+                       pol = &preferred_node_policy[node];
+
+               /* preferred_node_policy is not initialised early in boot */
+               if (!pol->mode)
+                       pol = NULL;
+       }
+
+       return pol;
+}
+
 static const struct mempolicy_operations {
        int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
        /*
@@ -254,7 +275,7 @@ static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
        if (mode == MPOL_DEFAULT) {
                if (nodes && !nodes_empty(*nodes))
                        return ERR_PTR(-EINVAL);
-               return NULL;    /* simply delete any existing policy */
+               return NULL;
        }
        VM_BUG_ON(!nodes);
 
@@ -269,6 +290,10 @@ static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
                             (flags & MPOL_F_RELATIVE_NODES)))
                                return ERR_PTR(-EINVAL);
                }
+       } else if (mode == MPOL_LOCAL) {
+               if (!nodes_empty(*nodes))
+                       return ERR_PTR(-EINVAL);
+               mode = MPOL_PREFERRED;
        } else if (nodes_empty(*nodes))
                return ERR_PTR(-EINVAL);
        policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
@@ -561,6 +586,36 @@ static inline int check_pgd_range(struct vm_area_struct *vma,
        return 0;
 }
 
+#ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE
+/*
+ * This is used to mark a range of virtual addresses to be inaccessible.
+ * These are later cleared by a NUMA hinting fault. Depending on these
+ * faults, pages may be migrated for better NUMA placement.
+ *
+ * This is assuming that NUMA faults are handled using PROT_NONE. If
+ * an architecture makes a different choice, it will need further
+ * changes to the core.
+ */
+unsigned long change_prot_numa(struct vm_area_struct *vma,
+                       unsigned long addr, unsigned long end)
+{
+       int nr_updated;
+       BUILD_BUG_ON(_PAGE_NUMA != _PAGE_PROTNONE);
+
+       nr_updated = change_protection(vma, addr, end, vma->vm_page_prot, 0, 1);
+       if (nr_updated)
+               count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
+
+       return nr_updated;
+}
+#else
+static unsigned long change_prot_numa(struct vm_area_struct *vma,
+                       unsigned long addr, unsigned long end)
+{
+       return 0;
+}
+#endif /* CONFIG_ARCH_USES_NUMA_PROT_NONE */
+
 /*
  * Check if all pages in a range are on a set of nodes.
  * If pagelist != NULL then isolate pages from the LRU and
@@ -579,22 +634,32 @@ check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
                return ERR_PTR(-EFAULT);
        prev = NULL;
        for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
+               unsigned long endvma = vma->vm_end;
+
+               if (endvma > end)
+                       endvma = end;
+               if (vma->vm_start > start)
+                       start = vma->vm_start;
+
                if (!(flags & MPOL_MF_DISCONTIG_OK)) {
                        if (!vma->vm_next && vma->vm_end < end)
                                return ERR_PTR(-EFAULT);
                        if (prev && prev->vm_end < vma->vm_start)
                                return ERR_PTR(-EFAULT);
                }
-               if (!is_vm_hugetlb_page(vma) &&
-                   ((flags & MPOL_MF_STRICT) ||
+
+               if (is_vm_hugetlb_page(vma))
+                       goto next;
+
+               if (flags & MPOL_MF_LAZY) {
+                       change_prot_numa(vma, start, endvma);
+                       goto next;
+               }
+
+               if ((flags & MPOL_MF_STRICT) ||
                     ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
-                               vma_migratable(vma)))) {
-                       unsigned long endvma = vma->vm_end;
+                     vma_migratable(vma))) {
 
-                       if (endvma > end)
-                               endvma = end;
-                       if (vma->vm_start > start)
-                               start = vma->vm_start;
                        err = check_pgd_range(vma, start, endvma, nodes,
                                                flags, private);
                        if (err) {
@@ -602,6 +667,7 @@ check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
                                break;
                        }
                }
+next:
                prev = vma;
        }
        return first;
@@ -961,7 +1027,8 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest,
 
        if (!list_empty(&pagelist)) {
                err = migrate_pages(&pagelist, new_node_page, dest,
-                                                       false, MIGRATE_SYNC);
+                                                       false, MIGRATE_SYNC,
+                                                       MR_SYSCALL);
                if (err)
                        putback_lru_pages(&pagelist);
        }
@@ -1133,8 +1200,7 @@ static long do_mbind(unsigned long start, unsigned long len,
        int err;
        LIST_HEAD(pagelist);
 
-       if (flags & ~(unsigned long)(MPOL_MF_STRICT |
-                                    MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
+       if (flags & ~(unsigned long)MPOL_MF_VALID)
                return -EINVAL;
        if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
                return -EPERM;
@@ -1157,6 +1223,9 @@ static long do_mbind(unsigned long start, unsigned long len,
        if (IS_ERR(new))
                return PTR_ERR(new);
 
+       if (flags & MPOL_MF_LAZY)
+               new->flags |= MPOL_F_MOF;
+
        /*
         * If we are using the default policy then operation
         * on discontinuous address spaces is okay after all
@@ -1193,21 +1262,24 @@ static long do_mbind(unsigned long start, unsigned long len,
        vma = check_range(mm, start, end, nmask,
                          flags | MPOL_MF_INVERT, &pagelist);
 
-       err = PTR_ERR(vma);
-       if (!IS_ERR(vma)) {
-               int nr_failed = 0;
-
+       err = PTR_ERR(vma);     /* maybe ... */
+       if (!IS_ERR(vma))
                err = mbind_range(mm, start, end, new);
 
+       if (!err) {
+               int nr_failed = 0;
+
                if (!list_empty(&pagelist)) {
+                       WARN_ON_ONCE(flags & MPOL_MF_LAZY);
                        nr_failed = migrate_pages(&pagelist, new_vma_page,
                                                (unsigned long)vma,
-                                               false, MIGRATE_SYNC);
+                                               false, MIGRATE_SYNC,
+                                               MR_MEMPOLICY_MBIND);
                        if (nr_failed)
                                putback_lru_pages(&pagelist);
                }
 
-               if (!err && nr_failed && (flags & MPOL_MF_STRICT))
+               if (nr_failed && (flags & MPOL_MF_STRICT))
                        err = -EIO;
        } else
                putback_lru_pages(&pagelist);
@@ -1546,7 +1618,7 @@ asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
 struct mempolicy *get_vma_policy(struct task_struct *task,
                struct vm_area_struct *vma, unsigned long addr)
 {
-       struct mempolicy *pol = task->mempolicy;
+       struct mempolicy *pol = get_task_policy(task);
 
        if (vma) {
                if (vma->vm_ops && vma->vm_ops->get_policy) {
@@ -1956,7 +2028,7 @@ retry_cpuset:
  */
 struct page *alloc_pages_current(gfp_t gfp, unsigned order)
 {
-       struct mempolicy *pol = current->mempolicy;
+       struct mempolicy *pol = get_task_policy(current);
        struct page *page;
        unsigned int cpuset_mems_cookie;
 
@@ -2140,6 +2212,115 @@ static void sp_free(struct sp_node *n)
        kmem_cache_free(sn_cache, n);
 }
 
+/**
+ * mpol_misplaced - check whether current page node is valid in policy
+ *
+ * @page   - page to be checked
+ * @vma    - vm area where page mapped
+ * @addr   - virtual address where page mapped
+ *
+ * Lookup current policy node id for vma,addr and "compare to" page's
+ * node id.
+ *
+ * Returns:
+ *     -1      - not misplaced, page is in the right node
+ *     node    - node id where the page should be
+ *
+ * Policy determination "mimics" alloc_page_vma().
+ * Called from fault path where we know the vma and faulting address.
+ */
+int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
+{
+       struct mempolicy *pol;
+       struct zone *zone;
+       int curnid = page_to_nid(page);
+       unsigned long pgoff;
+       int polnid = -1;
+       int ret = -1;
+
+       BUG_ON(!vma);
+
+       pol = get_vma_policy(current, vma, addr);
+       if (!(pol->flags & MPOL_F_MOF))
+               goto out;
+
+       switch (pol->mode) {
+       case MPOL_INTERLEAVE:
+               BUG_ON(addr >= vma->vm_end);
+               BUG_ON(addr < vma->vm_start);
+
+               pgoff = vma->vm_pgoff;
+               pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
+               polnid = offset_il_node(pol, vma, pgoff);
+               break;
+
+       case MPOL_PREFERRED:
+               if (pol->flags & MPOL_F_LOCAL)
+                       polnid = numa_node_id();
+               else
+                       polnid = pol->v.preferred_node;
+               break;
+
+       case MPOL_BIND:
+               /*
+                * allows binding to multiple nodes.
+                * use current page if in policy nodemask,
+                * else select nearest allowed node, if any.
+                * If no allowed nodes, use current [!misplaced].
+                */
+               if (node_isset(curnid, pol->v.nodes))
+                       goto out;
+               (void)first_zones_zonelist(
+                               node_zonelist(numa_node_id(), GFP_HIGHUSER),
+                               gfp_zone(GFP_HIGHUSER),
+                               &pol->v.nodes, &zone);
+               polnid = zone->node;
+               break;
+
+       default:
+               BUG();
+       }
+
+       /* Migrate the page towards the node whose CPU is referencing it */
+       if (pol->flags & MPOL_F_MORON) {
+               int last_nid;
+
+               polnid = numa_node_id();
+
+               /*
+                * Multi-stage node selection is used in conjunction
+                * with a periodic migration fault to build a temporal
+                * task<->page relation. By using a two-stage filter we
+                * remove short/unlikely relations.
+                *
+                * Using P(p) ~ n_p / n_t as per frequentist
+                * probability, we can equate a task's usage of a
+                * particular page (n_p) per total usage of this
+                * page (n_t) (in a given time-span) to a probability.
+                *
+                * Our periodic faults will sample this probability and
+                * getting the same result twice in a row, given these
+                * samples are fully independent, is then given by
+                * P(n)^2, provided our sample period is sufficiently
+                * short compared to the usage pattern.
+                *
+                * This quadric squishes small probabilities, making
+                * it less likely we act on an unlikely task<->page
+                * relation.
+                */
+               last_nid = page_xchg_last_nid(page, polnid);
+               if (last_nid != polnid)
+                       goto out;
+       }
+
+       if (curnid != polnid)
+               ret = polnid;
+out:
+       mpol_cond_put(pol);
+
+       return ret;
+}
+
 static void sp_delete(struct shared_policy *sp, struct sp_node *n)
 {
        pr_debug("deleting %lx-l%lx\n", n->start, n->end);
@@ -2305,6 +2486,50 @@ void mpol_free_shared_policy(struct shared_policy *p)
        mutex_unlock(&p->mutex);
 }
 
+#ifdef CONFIG_NUMA_BALANCING
+static bool __initdata numabalancing_override;
+
+static void __init check_numabalancing_enable(void)
+{
+       bool numabalancing_default = false;
+
+       if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
+               numabalancing_default = true;
+
+       if (nr_node_ids > 1 && !numabalancing_override) {
+               printk(KERN_INFO "Enabling automatic NUMA balancing. "
+                       "Configure with numa_balancing= or sysctl");
+               set_numabalancing_state(numabalancing_default);
+       }
+}
+
+static int __init setup_numabalancing(char *str)
+{
+       int ret = 0;
+       if (!str)
+               goto out;
+       numabalancing_override = true;
+
+       if (!strcmp(str, "enable")) {
+               set_numabalancing_state(true);
+               ret = 1;
+       } else if (!strcmp(str, "disable")) {
+               set_numabalancing_state(false);
+               ret = 1;
+       }
+out:
+       if (!ret)
+               printk(KERN_WARNING "Unable to parse numa_balancing=\n");
+
+       return ret;
+}
+__setup("numa_balancing=", setup_numabalancing);
+#else
+static inline void __init check_numabalancing_enable(void)
+{
+}
+#endif /* CONFIG_NUMA_BALANCING */
+
 /* assumes fs == KERNEL_DS */
 void __init numa_policy_init(void)
 {
@@ -2320,6 +2545,15 @@ void __init numa_policy_init(void)
                                     sizeof(struct sp_node),
                                     0, SLAB_PANIC, NULL);
 
+       for_each_node(nid) {
+               preferred_node_policy[nid] = (struct mempolicy) {
+                       .refcnt = ATOMIC_INIT(1),
+                       .mode = MPOL_PREFERRED,
+                       .flags = MPOL_F_MOF | MPOL_F_MORON,
+                       .v = { .preferred_node = nid, },
+               };
+       }
+
        /*
         * Set interleaving policy for system init. Interleaving is only
         * enabled across suitably sized nodes (default is >= 16MB), or
@@ -2346,6 +2580,8 @@ void __init numa_policy_init(void)
 
        if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
                printk("numa_policy_init: interleaving failed\n");
+
+       check_numabalancing_enable();
 }
 
 /* Reset policy of current process to default */
@@ -2362,14 +2598,13 @@ void numa_default_policy(void)
  * "local" is pseudo-policy:  MPOL_PREFERRED with MPOL_F_LOCAL flag
  * Used only for mpol_parse_str() and mpol_to_str()
  */
-#define MPOL_LOCAL MPOL_MAX
 static const char * const policy_modes[] =
 {
        [MPOL_DEFAULT]    = "default",
        [MPOL_PREFERRED]  = "prefer",
        [MPOL_BIND]       = "bind",
        [MPOL_INTERLEAVE] = "interleave",
-       [MPOL_LOCAL]      = "local"
+       [MPOL_LOCAL]      = "local",
 };
 
 
@@ -2415,12 +2650,12 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
        if (flags)
                *flags++ = '\0';        /* terminate mode string */
 
-       for (mode = 0; mode <= MPOL_LOCAL; mode++) {
+       for (mode = 0; mode < MPOL_MAX; mode++) {
                if (!strcmp(str, policy_modes[mode])) {
                        break;
                }
        }
-       if (mode > MPOL_LOCAL)
+       if (mode >= MPOL_MAX)
                goto out;
 
        switch (mode) {
index cae0271..32efd80 100644 (file)
@@ -39,6 +39,9 @@
 
 #include <asm/tlbflush.h>
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/migrate.h>
+
 #include "internal.h"
 
 /*
@@ -293,7 +296,7 @@ static int migrate_page_move_mapping(struct address_space *mapping,
                struct page *newpage, struct page *page,
                struct buffer_head *head, enum migrate_mode mode)
 {
-       int expected_count;
+       int expected_count = 0;
        void **pslot;
 
        if (!mapping) {
@@ -421,7 +424,7 @@ int migrate_huge_page_move_mapping(struct address_space *mapping,
  */
 void migrate_page_copy(struct page *newpage, struct page *page)
 {
-       if (PageHuge(page))
+       if (PageHuge(page) || PageTransHuge(page))
                copy_huge_page(newpage, page);
        else
                copy_highpage(newpage, page);
@@ -765,7 +768,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
         */
        if (PageAnon(page)) {
                /*
-                * Only page_lock_anon_vma() understands the subtleties of
+                * Only page_lock_anon_vma_read() understands the subtleties of
                 * getting a hold on an anon_vma from outside one of its mms.
                 */
                anon_vma = page_get_anon_vma(page);
@@ -998,10 +1001,11 @@ out:
  */
 int migrate_pages(struct list_head *from,
                new_page_t get_new_page, unsigned long private, bool offlining,
-               enum migrate_mode mode)
+               enum migrate_mode mode, int reason)
 {
        int retry = 1;
        int nr_failed = 0;
+       int nr_succeeded = 0;
        int pass = 0;
        struct page *page;
        struct page *page2;
@@ -1028,6 +1032,7 @@ int migrate_pages(struct list_head *from,
                                retry++;
                                break;
                        case MIGRATEPAGE_SUCCESS:
+                               nr_succeeded++;
                                break;
                        default:
                                /* Permanent failure */
@@ -1038,6 +1043,12 @@ int migrate_pages(struct list_head *from,
        }
        rc = nr_failed + retry;
 out:
+       if (nr_succeeded)
+               count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded);
+       if (nr_failed)
+               count_vm_events(PGMIGRATE_FAIL, nr_failed);
+       trace_mm_migrate_pages(nr_succeeded, nr_failed, mode, reason);
+
        if (!swapwrite)
                current->flags &= ~PF_SWAPWRITE;
 
@@ -1176,7 +1187,8 @@ set_status:
        err = 0;
        if (!list_empty(&pagelist)) {
                err = migrate_pages(&pagelist, new_page_node,
-                               (unsigned long)pm, 0, MIGRATE_SYNC);
+                               (unsigned long)pm, 0, MIGRATE_SYNC,
+                               MR_SYSCALL);
                if (err)
                        putback_lru_pages(&pagelist);
        }
@@ -1440,4 +1452,317 @@ int migrate_vmas(struct mm_struct *mm, const nodemask_t *to,
        }
        return err;
 }
-#endif
+
+#ifdef CONFIG_NUMA_BALANCING
+/*
+ * Returns true if this is a safe migration target node for misplaced NUMA
+ * pages. Currently it only checks the watermarks which crude
+ */
+static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
+                                  int nr_migrate_pages)
+{
+       int z;
+       for (z = pgdat->nr_zones - 1; z >= 0; z--) {
+               struct zone *zone = pgdat->node_zones + z;
+
+               if (!populated_zone(zone))
+                       continue;
+
+               if (zone->all_unreclaimable)
+                       continue;
+
+               /* Avoid waking kswapd by allocating pages_to_migrate pages. */
+               if (!zone_watermark_ok(zone, 0,
+                                      high_wmark_pages(zone) +
+                                      nr_migrate_pages,
+                                      0, 0))
+                       continue;
+               return true;
+       }
+       return false;
+}
+
+static struct page *alloc_misplaced_dst_page(struct page *page,
+                                          unsigned long data,
+                                          int **result)
+{
+       int nid = (int) data;
+       struct page *newpage;
+
+       newpage = alloc_pages_exact_node(nid,
+                                        (GFP_HIGHUSER_MOVABLE | GFP_THISNODE |
+                                         __GFP_NOMEMALLOC | __GFP_NORETRY |
+                                         __GFP_NOWARN) &
+                                        ~GFP_IOFS, 0);
+       if (newpage)
+               page_xchg_last_nid(newpage, page_last_nid(page));
+
+       return newpage;
+}
+
+/*
+ * page migration rate limiting control.
+ * Do not migrate more than @pages_to_migrate in a @migrate_interval_millisecs
+ * window of time. Default here says do not migrate more than 1280M per second.
+ * If a node is rate-limited then PTE NUMA updates are also rate-limited. However
+ * as it is faults that reset the window, pte updates will happen unconditionally
+ * if there has not been a fault since @pteupdate_interval_millisecs after the
+ * throttle window closed.
+ */
+static unsigned int migrate_interval_millisecs __read_mostly = 100;
+static unsigned int pteupdate_interval_millisecs __read_mostly = 1000;
+static unsigned int ratelimit_pages __read_mostly = 128 << (20 - PAGE_SHIFT);
+
+/* Returns true if NUMA migration is currently rate limited */
+bool migrate_ratelimited(int node)
+{
+       pg_data_t *pgdat = NODE_DATA(node);
+
+       if (time_after(jiffies, pgdat->numabalancing_migrate_next_window +
+                               msecs_to_jiffies(pteupdate_interval_millisecs)))
+               return false;
+
+       if (pgdat->numabalancing_migrate_nr_pages < ratelimit_pages)
+               return false;
+
+       return true;
+}
+
+/* Returns true if the node is migrate rate-limited after the update */
+bool numamigrate_update_ratelimit(pg_data_t *pgdat, unsigned long nr_pages)
+{
+       bool rate_limited = false;
+
+       /*
+        * Rate-limit the amount of data that is being migrated to a node.
+        * Optimal placement is no good if the memory bus is saturated and
+        * all the time is being spent migrating!
+        */
+       spin_lock(&pgdat->numabalancing_migrate_lock);
+       if (time_after(jiffies, pgdat->numabalancing_migrate_next_window)) {
+               pgdat->numabalancing_migrate_nr_pages = 0;
+               pgdat->numabalancing_migrate_next_window = jiffies +
+                       msecs_to_jiffies(migrate_interval_millisecs);
+       }
+       if (pgdat->numabalancing_migrate_nr_pages > ratelimit_pages)
+               rate_limited = true;
+       else
+               pgdat->numabalancing_migrate_nr_pages += nr_pages;
+       spin_unlock(&pgdat->numabalancing_migrate_lock);
+       
+       return rate_limited;
+}
+
+int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
+{
+       int ret = 0;
+
+       /* Avoid migrating to a node that is nearly full */
+       if (migrate_balanced_pgdat(pgdat, 1)) {
+               int page_lru;
+
+               if (isolate_lru_page(page)) {
+                       put_page(page);
+                       return 0;
+               }
+
+               /* Page is isolated */
+               ret = 1;
+               page_lru = page_is_file_cache(page);
+               if (!PageTransHuge(page))
+                       inc_zone_page_state(page, NR_ISOLATED_ANON + page_lru);
+               else
+                       mod_zone_page_state(page_zone(page),
+                                       NR_ISOLATED_ANON + page_lru,
+                                       HPAGE_PMD_NR);
+       }
+
+       /*
+        * Page is either isolated or there is not enough space on the target
+        * node. If isolated, then it has taken a reference count and the
+        * callers reference can be safely dropped without the page
+        * disappearing underneath us during migration. Otherwise the page is
+        * not to be migrated but the callers reference should still be
+        * dropped so it does not leak.
+        */
+       put_page(page);
+
+       return ret;
+}
+
+/*
+ * Attempt to migrate a misplaced page to the specified destination
+ * node. Caller is expected to have an elevated reference count on
+ * the page that will be dropped by this function before returning.
+ */
+int migrate_misplaced_page(struct page *page, int node)
+{
+       pg_data_t *pgdat = NODE_DATA(node);
+       int isolated = 0;
+       int nr_remaining;
+       LIST_HEAD(migratepages);
+
+       /*
+        * Don't migrate pages that are mapped in multiple processes.
+        * TODO: Handle false sharing detection instead of this hammer
+        */
+       if (page_mapcount(page) != 1) {
+               put_page(page);
+               goto out;
+       }
+
+       /*
+        * Rate-limit the amount of data that is being migrated to a node.
+        * Optimal placement is no good if the memory bus is saturated and
+        * all the time is being spent migrating!
+        */
+       if (numamigrate_update_ratelimit(pgdat, 1)) {
+               put_page(page);
+               goto out;
+       }
+
+       isolated = numamigrate_isolate_page(pgdat, page);
+       if (!isolated)
+               goto out;
+
+       list_add(&page->lru, &migratepages);
+       nr_remaining = migrate_pages(&migratepages,
+                       alloc_misplaced_dst_page,
+                       node, false, MIGRATE_ASYNC,
+                       MR_NUMA_MISPLACED);
+       if (nr_remaining) {
+               putback_lru_pages(&migratepages);
+               isolated = 0;
+       } else
+               count_vm_numa_event(NUMA_PAGE_MIGRATE);
+       BUG_ON(!list_empty(&migratepages));
+out:
+       return isolated;
+}
+#endif /* CONFIG_NUMA_BALANCING */
+
+#if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
+int migrate_misplaced_transhuge_page(struct mm_struct *mm,
+                               struct vm_area_struct *vma,
+                               pmd_t *pmd, pmd_t entry,
+                               unsigned long address,
+                               struct page *page, int node)
+{
+       unsigned long haddr = address & HPAGE_PMD_MASK;
+       pg_data_t *pgdat = NODE_DATA(node);
+       int isolated = 0;
+       struct page *new_page = NULL;
+       struct mem_cgroup *memcg = NULL;
+       int page_lru = page_is_file_cache(page);
+
+       /*
+        * Don't migrate pages that are mapped in multiple processes.
+        * TODO: Handle false sharing detection instead of this hammer
+        */
+       if (page_mapcount(page) != 1)
+               goto out_dropref;
+
+       /*
+        * Rate-limit the amount of data that is being migrated to a node.
+        * Optimal placement is no good if the memory bus is saturated and
+        * all the time is being spent migrating!
+        */
+       if (numamigrate_update_ratelimit(pgdat, HPAGE_PMD_NR))
+               goto out_dropref;
+
+       new_page = alloc_pages_node(node,
+               (GFP_TRANSHUGE | GFP_THISNODE) & ~__GFP_WAIT, HPAGE_PMD_ORDER);
+       if (!new_page) {
+               count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
+               goto out_dropref;
+       }
+       page_xchg_last_nid(new_page, page_last_nid(page));
+
+       isolated = numamigrate_isolate_page(pgdat, page);
+       if (!isolated) {
+               count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
+               put_page(new_page);
+               goto out_keep_locked;
+       }
+
+       /* Prepare a page as a migration target */
+       __set_page_locked(new_page);
+       SetPageSwapBacked(new_page);
+
+       /* anon mapping, we can simply copy page->mapping to the new page: */
+       new_page->mapping = page->mapping;
+       new_page->index = page->index;
+       migrate_page_copy(new_page, page);
+       WARN_ON(PageLRU(new_page));
+
+       /* Recheck the target PMD */
+       spin_lock(&mm->page_table_lock);
+       if (unlikely(!pmd_same(*pmd, entry))) {
+               spin_unlock(&mm->page_table_lock);
+
+               /* Reverse changes made by migrate_page_copy() */
+               if (TestClearPageActive(new_page))
+                       SetPageActive(page);
+               if (TestClearPageUnevictable(new_page))
+                       SetPageUnevictable(page);
+               mlock_migrate_page(page, new_page);
+
+               unlock_page(new_page);
+               put_page(new_page);             /* Free it */
+
+               unlock_page(page);
+               putback_lru_page(page);
+
+               count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
+               goto out;
+       }
+
+       /*
+        * Traditional migration needs to prepare the memcg charge
+        * transaction early to prevent the old page from being
+        * uncharged when installing migration entries.  Here we can
+        * save the potential rollback and start the charge transfer
+        * only when migration is already known to end successfully.
+        */
+       mem_cgroup_prepare_migration(page, new_page, &memcg);
+
+       entry = mk_pmd(new_page, vma->vm_page_prot);
+       entry = pmd_mknonnuma(entry);
+       entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
+       entry = pmd_mkhuge(entry);
+
+       page_add_new_anon_rmap(new_page, vma, haddr);
+
+       set_pmd_at(mm, haddr, pmd, entry);
+       update_mmu_cache_pmd(vma, address, entry);
+       page_remove_rmap(page);
+       /*
+        * Finish the charge transaction under the page table lock to
+        * prevent split_huge_page() from dividing up the charge
+        * before it's fully transferred to the new page.
+        */
+       mem_cgroup_end_migration(memcg, page, new_page, true);
+       spin_unlock(&mm->page_table_lock);
+
+       unlock_page(new_page);
+       unlock_page(page);
+       put_page(page);                 /* Drop the rmap reference */
+       put_page(page);                 /* Drop the LRU isolation reference */
+
+       count_vm_events(PGMIGRATE_SUCCESS, HPAGE_PMD_NR);
+       count_vm_numa_events(NUMA_PAGE_MIGRATE, HPAGE_PMD_NR);
+
+out:
+       mod_zone_page_state(page_zone(page),
+                       NR_ISOLATED_ANON + page_lru,
+                       -HPAGE_PMD_NR);
+       return isolated;
+
+out_dropref:
+       put_page(page);
+out_keep_locked:
+       return 0;
+}
+#endif /* CONFIG_NUMA_BALANCING */
+
+#endif /* CONFIG_NUMA */
index 2b7d9e7..f54b235 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -736,7 +736,7 @@ again:                      remove_next = 1 + (end > next->vm_end);
        if (anon_vma) {
                VM_BUG_ON(adjust_next && next->anon_vma &&
                          anon_vma != next->anon_vma);
-               anon_vma_lock(anon_vma);
+               anon_vma_lock_write(anon_vma);
                anon_vma_interval_tree_pre_update_vma(vma);
                if (adjust_next)
                        anon_vma_interval_tree_pre_update_vma(next);
@@ -2886,15 +2886,15 @@ static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
                 * The LSB of head.next can't change from under us
                 * because we hold the mm_all_locks_mutex.
                 */
-               mutex_lock_nest_lock(&anon_vma->root->mutex, &mm->mmap_sem);
+               down_write(&anon_vma->root->rwsem);
                /*
                 * We can safely modify head.next after taking the
-                * anon_vma->root->mutex. If some other vma in this mm shares
+                * anon_vma->root->rwsem. If some other vma in this mm shares
                 * the same anon_vma we won't take it again.
                 *
                 * No need of atomic instructions here, head.next
                 * can't change from under us thanks to the
-                * anon_vma->root->mutex.
+                * anon_vma->root->rwsem.
                 */
                if (__test_and_set_bit(0, (unsigned long *)
                                       &anon_vma->root->rb_root.rb_node))
@@ -2996,7 +2996,7 @@ static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
                 *
                 * No need of atomic instructions here, head.next
                 * can't change from under us until we release the
-                * anon_vma->root->mutex.
+                * anon_vma->root->rwsem.
                 */
                if (!__test_and_clear_bit(0, (unsigned long *)
                                          &anon_vma->root->rb_root.rb_node))
index e8c3938..3dca970 100644 (file)
@@ -35,12 +35,16 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
 }
 #endif
 
-static void change_pte_range(struct mm_struct *mm, pmd_t *pmd,
+static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
                unsigned long addr, unsigned long end, pgprot_t newprot,
-               int dirty_accountable)
+               int dirty_accountable, int prot_numa, bool *ret_all_same_node)
 {
+       struct mm_struct *mm = vma->vm_mm;
        pte_t *pte, oldpte;
        spinlock_t *ptl;
+       unsigned long pages = 0;
+       bool all_same_node = true;
+       int last_nid = -1;
 
        pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
        arch_enter_lazy_mmu_mode();
@@ -48,17 +52,43 @@ static void change_pte_range(struct mm_struct *mm, pmd_t *pmd,
                oldpte = *pte;
                if (pte_present(oldpte)) {
                        pte_t ptent;
+                       bool updated = false;
 
                        ptent = ptep_modify_prot_start(mm, addr, pte);
-                       ptent = pte_modify(ptent, newprot);
+                       if (!prot_numa) {
+                               ptent = pte_modify(ptent, newprot);
+                               updated = true;
+                       } else {
+                               struct page *page;
+
+                               page = vm_normal_page(vma, addr, oldpte);
+                               if (page) {
+                                       int this_nid = page_to_nid(page);
+                                       if (last_nid == -1)
+                                               last_nid = this_nid;
+                                       if (last_nid != this_nid)
+                                               all_same_node = false;
+
+                                       /* only check non-shared pages */
+                                       if (!pte_numa(oldpte) &&
+                                           page_mapcount(page) == 1) {
+                                               ptent = pte_mknuma(ptent);
+                                               updated = true;
+                                       }
+                               }
+                       }
 
                        /*
                         * Avoid taking write faults for pages we know to be
                         * dirty.
                         */
-                       if (dirty_accountable && pte_dirty(ptent))
+                       if (dirty_accountable && pte_dirty(ptent)) {
                                ptent = pte_mkwrite(ptent);
+                               updated = true;
+                       }
 
+                       if (updated)
+                               pages++;
                        ptep_modify_prot_commit(mm, addr, pte, ptent);
                } else if (IS_ENABLED(CONFIG_MIGRATION) && !pte_file(oldpte)) {
                        swp_entry_t entry = pte_to_swp_entry(oldpte);
@@ -72,18 +102,40 @@ static void change_pte_range(struct mm_struct *mm, pmd_t *pmd,
                                set_pte_at(mm, addr, pte,
                                        swp_entry_to_pte(entry));
                        }
+                       pages++;
                }
        } while (pte++, addr += PAGE_SIZE, addr != end);
        arch_leave_lazy_mmu_mode();
        pte_unmap_unlock(pte - 1, ptl);
+
+       *ret_all_same_node = all_same_node;
+       return pages;
 }
 
-static inline void change_pmd_range(struct vm_area_struct *vma, pud_t *pud,
+#ifdef CONFIG_NUMA_BALANCING
+static inline void change_pmd_protnuma(struct mm_struct *mm, unsigned long addr,
+               pmd_t *pmd)
+{
+       spin_lock(&mm->page_table_lock);
+       set_pmd_at(mm, addr & PMD_MASK, pmd, pmd_mknuma(*pmd));
+       spin_unlock(&mm->page_table_lock);
+}
+#else
+static inline void change_pmd_protnuma(struct mm_struct *mm, unsigned long addr,
+               pmd_t *pmd)
+{
+       BUG();
+}
+#endif /* CONFIG_NUMA_BALANCING */
+
+static inline unsigned long change_pmd_range(struct vm_area_struct *vma, pud_t *pud,
                unsigned long addr, unsigned long end, pgprot_t newprot,
-               int dirty_accountable)
+               int dirty_accountable, int prot_numa)
 {
        pmd_t *pmd;
        unsigned long next;
+       unsigned long pages = 0;
+       bool all_same_node;
 
        pmd = pmd_offset(pud, addr);
        do {
@@ -91,42 +143,59 @@ static inline void change_pmd_range(struct vm_area_struct *vma, pud_t *pud,
                if (pmd_trans_huge(*pmd)) {
                        if (next - addr != HPAGE_PMD_SIZE)
                                split_huge_page_pmd(vma, addr, pmd);
-                       else if (change_huge_pmd(vma, pmd, addr, newprot))
+                       else if (change_huge_pmd(vma, pmd, addr, newprot, prot_numa)) {
+                               pages += HPAGE_PMD_NR;
                                continue;
+                       }
                        /* fall through */
                }
                if (pmd_none_or_clear_bad(pmd))
                        continue;
-               change_pte_range(vma->vm_mm, pmd, addr, next, newprot,
-                                dirty_accountable);
+               pages += change_pte_range(vma, pmd, addr, next, newprot,
+                                dirty_accountable, prot_numa, &all_same_node);
+
+               /*
+                * If we are changing protections for NUMA hinting faults then
+                * set pmd_numa if the examined pages were all on the same
+                * node. This allows a regular PMD to be handled as one fault
+                * and effectively batches the taking of the PTL
+                */
+               if (prot_numa && all_same_node)
+                       change_pmd_protnuma(vma->vm_mm, addr, pmd);
        } while (pmd++, addr = next, addr != end);
+
+       return pages;
 }
 
-static inline void change_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
+static inline unsigned long change_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
                unsigned long addr, unsigned long end, pgprot_t newprot,
-               int dirty_accountable)
+               int dirty_accountable, int prot_numa)
 {
        pud_t *pud;
        unsigned long next;
+       unsigned long pages = 0;
 
        pud = pud_offset(pgd, addr);
        do {
                next = pud_addr_end(addr, end);
                if (pud_none_or_clear_bad(pud))
                        continue;
-               change_pmd_range(vma, pud, addr, next, newprot,
-                                dirty_accountable);
+               pages += change_pmd_range(vma, pud, addr, next, newprot,
+                                dirty_accountable, prot_numa);
        } while (pud++, addr = next, addr != end);
+
+       return pages;
 }
 
-static void change_protection(struct vm_area_struct *vma,
+static unsigned long change_protection_range(struct vm_area_struct *vma,
                unsigned long addr, unsigned long end, pgprot_t newprot,
-               int dirty_accountable)
+               int dirty_accountable, int prot_numa)
 {
        struct mm_struct *mm = vma->vm_mm;
        pgd_t *pgd;
        unsigned long next;
        unsigned long start = addr;
+       unsigned long pages = 0;
 
        BUG_ON(addr >= end);
        pgd = pgd_offset(mm, addr);
@@ -135,10 +204,32 @@ static void change_protection(struct vm_area_struct *vma,
                next = pgd_addr_end(addr, end);
                if (pgd_none_or_clear_bad(pgd))
                        continue;
-               change_pud_range(vma, pgd, addr, next, newprot,
-                                dirty_accountable);
+               pages += change_pud_range(vma, pgd, addr, next, newprot,
+                                dirty_accountable, prot_numa);
        } while (pgd++, addr = next, addr != end);
-       flush_tlb_range(vma, start, end);
+
+       /* Only flush the TLB if we actually modified any entries: */
+       if (pages)
+               flush_tlb_range(vma, start, end);
+
+       return pages;
+}
+
+unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
+                      unsigned long end, pgprot_t newprot,
+                      int dirty_accountable, int prot_numa)
+{
+       struct mm_struct *mm = vma->vm_mm;
+       unsigned long pages;
+
+       mmu_notifier_invalidate_range_start(mm, start, end);
+       if (is_vm_hugetlb_page(vma))
+               pages = hugetlb_change_protection(vma, start, end, newprot);
+       else
+               pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa);
+       mmu_notifier_invalidate_range_end(mm, start, end);
+
+       return pages;
 }
 
 int
@@ -213,12 +304,8 @@ success:
                dirty_accountable = 1;
        }
 
-       mmu_notifier_invalidate_range_start(mm, start, end);
-       if (is_vm_hugetlb_page(vma))
-               hugetlb_change_protection(vma, start, end, vma->vm_page_prot);
-       else
-               change_protection(vma, start, end, vma->vm_page_prot, dirty_accountable);
-       mmu_notifier_invalidate_range_end(mm, start, end);
+       change_protection(vma, start, end, vma->vm_page_prot, dirty_accountable, 0);
+
        vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
        vm_stat_account(mm, newflags, vma->vm_file, nrpages);
        perf_event_mmap(vma);
index eabb24d..e1031e1 100644 (file)
@@ -104,7 +104,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
                }
                if (vma->anon_vma) {
                        anon_vma = vma->anon_vma;
-                       anon_vma_lock(anon_vma);
+                       anon_vma_lock_write(anon_vma);
                }
        }
 
index 83637df..d037c8b 100644 (file)
@@ -611,6 +611,7 @@ static inline int free_pages_check(struct page *page)
                bad_page(page);
                return 1;
        }
+       reset_page_last_nid(page);
        if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
                page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
        return 0;
@@ -3883,6 +3884,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
                mminit_verify_page_links(page, zone, nid, pfn);
                init_page_count(page);
                reset_page_mapcount(page);
+               reset_page_last_nid(page);
                SetPageReserved(page);
                /*
                 * Mark the block movable so that blocks are reserved for
@@ -4526,6 +4528,11 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
        int ret;
 
        pgdat_resize_init(pgdat);
+#ifdef CONFIG_NUMA_BALANCING
+       spin_lock_init(&pgdat->numabalancing_migrate_lock);
+       pgdat->numabalancing_migrate_nr_pages = 0;
+       pgdat->numabalancing_migrate_next_window = jiffies;
+#endif
        init_waitqueue_head(&pgdat->kswapd_wait);
        init_waitqueue_head(&pgdat->pfmemalloc_wait);
        pgdat_page_cgroup_init(pgdat);
@@ -5800,7 +5807,8 @@ static int __alloc_contig_migrate_range(struct compact_control *cc,
 
                ret = migrate_pages(&cc->migratepages,
                                    alloc_migrate_target,
-                                   0, false, MIGRATE_SYNC);
+                                   0, false, MIGRATE_SYNC,
+                                   MR_CMA);
        }
 
        putback_movable_pages(&cc->migratepages);
index e642627..0c8323f 100644 (file)
@@ -12,8 +12,8 @@
 
 #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
 /*
- * Only sets the access flags (dirty, accessed, and
- * writable). Furthermore, we know it always gets set to a "more
+ * Only sets the access flags (dirty, accessed), as well as write 
+ * permission. Furthermore, we know it always gets set to a "more
  * permissive" setting, which allows most architectures to optimize
  * this. We return whether the PTE actually changed, which in turn
  * instructs the caller to do things like update__mmu_cache.  This
@@ -27,7 +27,7 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
        int changed = !pte_same(*ptep, entry);
        if (changed) {
                set_pte_at(vma->vm_mm, address, ptep, entry);
-               flush_tlb_page(vma, address);
+               flush_tlb_fix_spurious_fault(vma, address);
        }
        return changed;
 }
@@ -88,7 +88,8 @@ pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
 {
        pte_t pte;
        pte = ptep_get_and_clear((vma)->vm_mm, address, ptep);
-       flush_tlb_page(vma, address);
+       if (pte_accessible(pte))
+               flush_tlb_page(vma, address);
        return pte;
 }
 #endif
index face808..2c78f8c 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -24,7 +24,7 @@
  *   mm->mmap_sem
  *     page->flags PG_locked (lock_page)
  *       mapping->i_mmap_mutex
- *         anon_vma->mutex
+ *         anon_vma->rwsem
  *           mm->page_table_lock or pte_lock
  *             zone->lru_lock (in mark_page_accessed, isolate_lru_page)
  *             swap_lock (in swap_duplicate, swap_info_get)
@@ -37,7 +37,7 @@
  *                           in arch-dependent flush_dcache_mmap_lock,
  *                           within bdi.wb->list_lock in __sync_single_inode)
  *
- * anon_vma->mutex,mapping->i_mutex      (memory_failure, collect_procs_anon)
+ * anon_vma->rwsem,mapping->i_mutex      (memory_failure, collect_procs_anon)
  *   ->tasklist_lock
  *     pte map lock
  */
@@ -87,24 +87,24 @@ static inline void anon_vma_free(struct anon_vma *anon_vma)
        VM_BUG_ON(atomic_read(&anon_vma->refcount));
 
        /*
-        * Synchronize against page_lock_anon_vma() such that
+        * Synchronize against page_lock_anon_vma_read() such that
         * we can safely hold the lock without the anon_vma getting
         * freed.
         *
         * Relies on the full mb implied by the atomic_dec_and_test() from
         * put_anon_vma() against the acquire barrier implied by
-        * mutex_trylock() from page_lock_anon_vma(). This orders:
+        * down_read_trylock() from page_lock_anon_vma_read(). This orders:
         *
-        * page_lock_anon_vma()         VS      put_anon_vma()
-        *   mutex_trylock()                      atomic_dec_and_test()
+        * page_lock_anon_vma_read()    VS      put_anon_vma()
+        *   down_read_trylock()                  atomic_dec_and_test()
         *   LOCK                                 MB
-        *   atomic_read()                        mutex_is_locked()
+        *   atomic_read()                        rwsem_is_locked()
         *
         * LOCK should suffice since the actual taking of the lock must
         * happen _before_ what follows.
         */
-       if (mutex_is_locked(&anon_vma->root->mutex)) {
-               anon_vma_lock(anon_vma);
+       if (rwsem_is_locked(&anon_vma->root->rwsem)) {
+               anon_vma_lock_write(anon_vma);
                anon_vma_unlock(anon_vma);
        }
 
@@ -146,7 +146,7 @@ static void anon_vma_chain_link(struct vm_area_struct *vma,
  * allocate a new one.
  *
  * Anon-vma allocations are very subtle, because we may have
- * optimistically looked up an anon_vma in page_lock_anon_vma()
+ * optimistically looked up an anon_vma in page_lock_anon_vma_read()
  * and that may actually touch the spinlock even in the newly
  * allocated vma (it depends on RCU to make sure that the
  * anon_vma isn't actually destroyed).
@@ -181,7 +181,7 @@ int anon_vma_prepare(struct vm_area_struct *vma)
                        allocated = anon_vma;
                }
 
-               anon_vma_lock(anon_vma);
+               anon_vma_lock_write(anon_vma);
                /* page_table_lock to protect against threads */
                spin_lock(&mm->page_table_lock);
                if (likely(!vma->anon_vma)) {
@@ -219,9 +219,9 @@ static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct
        struct anon_vma *new_root = anon_vma->root;
        if (new_root != root) {
                if (WARN_ON_ONCE(root))
-                       mutex_unlock(&root->mutex);
+                       up_write(&root->rwsem);
                root = new_root;
-               mutex_lock(&root->mutex);
+               down_write(&root->rwsem);
        }
        return root;
 }
@@ -229,7 +229,7 @@ static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct
 static inline void unlock_anon_vma_root(struct anon_vma *root)
 {
        if (root)
-               mutex_unlock(&root->mutex);
+               up_write(&root->rwsem);
 }
 
 /*
@@ -306,7 +306,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
        get_anon_vma(anon_vma->root);
        /* Mark this anon_vma as the one where our new (COWed) pages go. */
        vma->anon_vma = anon_vma;
-       anon_vma_lock(anon_vma);
+       anon_vma_lock_write(anon_vma);
        anon_vma_chain_link(vma, avc, anon_vma);
        anon_vma_unlock(anon_vma);
 
@@ -349,7 +349,7 @@ void unlink_anon_vmas(struct vm_area_struct *vma)
        /*
         * Iterate the list once more, it now only contains empty and unlinked
         * anon_vmas, destroy them. Could not do before due to __put_anon_vma()
-        * needing to acquire the anon_vma->root->mutex.
+        * needing to write-acquire the anon_vma->root->rwsem.
         */
        list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
                struct anon_vma *anon_vma = avc->anon_vma;
@@ -365,7 +365,7 @@ static void anon_vma_ctor(void *data)
 {
        struct anon_vma *anon_vma = data;
 
-       mutex_init(&anon_vma->mutex);
+       init_rwsem(&anon_vma->rwsem);
        atomic_set(&anon_vma->refcount, 0);
        anon_vma->rb_root = RB_ROOT;
 }
@@ -442,7 +442,7 @@ out:
  * atomic op -- the trylock. If we fail the trylock, we fall back to getting a
  * reference like with page_get_anon_vma() and then block on the mutex.
  */
-struct anon_vma *page_lock_anon_vma(struct page *page)
+struct anon_vma *page_lock_anon_vma_read(struct page *page)
 {
        struct anon_vma *anon_vma = NULL;
        struct anon_vma *root_anon_vma;
@@ -457,14 +457,14 @@ struct anon_vma *page_lock_anon_vma(struct page *page)
 
        anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
        root_anon_vma = ACCESS_ONCE(anon_vma->root);
-       if (mutex_trylock(&root_anon_vma->mutex)) {
+       if (down_read_trylock(&root_anon_vma->rwsem)) {
                /*
                 * If the page is still mapped, then this anon_vma is still
                 * its anon_vma, and holding the mutex ensures that it will
                 * not go away, see anon_vma_free().
                 */
                if (!page_mapped(page)) {
-                       mutex_unlock(&root_anon_vma->mutex);
+                       up_read(&root_anon_vma->rwsem);
                        anon_vma = NULL;
                }
                goto out;
@@ -484,15 +484,15 @@ struct anon_vma *page_lock_anon_vma(struct page *page)
 
        /* we pinned the anon_vma, its safe to sleep */
        rcu_read_unlock();
-       anon_vma_lock(anon_vma);
+       anon_vma_lock_read(anon_vma);
 
        if (atomic_dec_and_test(&anon_vma->refcount)) {
                /*
                 * Oops, we held the last refcount, release the lock
                 * and bail -- can't simply use put_anon_vma() because
-                * we'll deadlock on the anon_vma_lock() recursion.
+                * we'll deadlock on the anon_vma_lock_write() recursion.
                 */
-               anon_vma_unlock(anon_vma);
+               anon_vma_unlock_read(anon_vma);
                __put_anon_vma(anon_vma);
                anon_vma = NULL;
        }
@@ -504,9 +504,9 @@ out:
        return anon_vma;
 }
 
-void page_unlock_anon_vma(struct anon_vma *anon_vma)
+void page_unlock_anon_vma_read(struct anon_vma *anon_vma)
 {
-       anon_vma_unlock(anon_vma);
+       anon_vma_unlock_read(anon_vma);
 }
 
 /*
@@ -744,7 +744,7 @@ static int page_referenced_anon(struct page *page,
        struct anon_vma_chain *avc;
        int referenced = 0;
 
-       anon_vma = page_lock_anon_vma(page);
+       anon_vma = page_lock_anon_vma_read(page);
        if (!anon_vma)
                return referenced;
 
@@ -766,7 +766,7 @@ static int page_referenced_anon(struct page *page,
                        break;
        }
 
-       page_unlock_anon_vma(anon_vma);
+       page_unlock_anon_vma_read(anon_vma);
        return referenced;
 }
 
@@ -1315,7 +1315,7 @@ out_mlock:
        /*
         * We need mmap_sem locking, Otherwise VM_LOCKED check makes
         * unstable result and race. Plus, We can't wait here because
-        * we now hold anon_vma->mutex or mapping->i_mmap_mutex.
+        * we now hold anon_vma->rwsem or mapping->i_mmap_mutex.
         * if trylock failed, the page remain in evictable lru and later
         * vmscan could retry to move the page to unevictable lru if the
         * page is actually mlocked.
@@ -1480,7 +1480,7 @@ static int try_to_unmap_anon(struct page *page, enum ttu_flags flags)
        struct anon_vma_chain *avc;
        int ret = SWAP_AGAIN;
 
-       anon_vma = page_lock_anon_vma(page);
+       anon_vma = page_lock_anon_vma_read(page);
        if (!anon_vma)
                return ret;
 
@@ -1507,7 +1507,7 @@ static int try_to_unmap_anon(struct page *page, enum ttu_flags flags)
                        break;
        }
 
-       page_unlock_anon_vma(anon_vma);
+       page_unlock_anon_vma_read(anon_vma);
        return ret;
 }
 
@@ -1702,7 +1702,7 @@ static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *,
        int ret = SWAP_AGAIN;
 
        /*
-        * Note: remove_migration_ptes() cannot use page_lock_anon_vma()
+        * Note: remove_migration_ptes() cannot use page_lock_anon_vma_read()
         * because that depends on page_mapped(); but not all its usages
         * are holding mmap_sem. Users without mmap_sem are required to
         * take a reference count to prevent the anon_vma disappearing
@@ -1710,7 +1710,7 @@ static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *,
        anon_vma = page_anon_vma(page);
        if (!anon_vma)
                return ret;
-       anon_vma_lock(anon_vma);
+       anon_vma_lock_read(anon_vma);
        anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
                struct vm_area_struct *vma = avc->vma;
                unsigned long address = vma_address(page, vma);
@@ -1718,7 +1718,7 @@ static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *,
                if (ret != SWAP_AGAIN)
                        break;
        }
-       anon_vma_unlock(anon_vma);
+       anon_vma_unlock_read(anon_vma);
        return ret;
 }
 
index df14808..9800306 100644 (file)
@@ -774,10 +774,20 @@ const char * const vmstat_text[] = {
 
        "pgrotated",
 
+#ifdef CONFIG_NUMA_BALANCING
+       "numa_pte_updates",
+       "numa_hint_faults",
+       "numa_hint_faults_local",
+       "numa_pages_migrated",
+#endif
+#ifdef CONFIG_MIGRATION
+       "pgmigrate_success",
+       "pgmigrate_fail",
+#endif
 #ifdef CONFIG_COMPACTION
-       "compact_blocks_moved",
-       "compact_pages_moved",
-       "compact_pagemigrate_failed",
+       "compact_migrate_scanned",
+       "compact_free_scanned",
+       "compact_isolated",
        "compact_stall",
        "compact_fail",
        "compact_success",
index 8aa4b11..0a69d07 100644 (file)
@@ -259,20 +259,16 @@ static int __init init_dns_resolver(void)
        if (!cred)
                return -ENOMEM;
 
-       keyring = key_alloc(&key_type_keyring, ".dns_resolver",
-                           GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, cred,
-                           (KEY_POS_ALL & ~KEY_POS_SETATTR) |
-                           KEY_USR_VIEW | KEY_USR_READ,
-                           KEY_ALLOC_NOT_IN_QUOTA);
+       keyring = keyring_alloc(".dns_resolver",
+                               GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, cred,
+                               (KEY_POS_ALL & ~KEY_POS_SETATTR) |
+                               KEY_USR_VIEW | KEY_USR_READ,
+                               KEY_ALLOC_NOT_IN_QUOTA, NULL);
        if (IS_ERR(keyring)) {
                ret = PTR_ERR(keyring);
                goto failed_put_cred;
        }
 
-       ret = key_instantiate_and_link(keyring, NULL, 0, NULL, NULL);
-       if (ret < 0)
-               goto failed_put_key;
-
        ret = register_key_type(&key_type_dns_resolver);
        if (ret < 0)
                goto failed_put_key;
@@ -304,3 +300,4 @@ static void __exit exit_dns_resolver(void)
 module_init(init_dns_resolver)
 module_exit(exit_dns_resolver)
 MODULE_LICENSE("GPL");
+
index a15c9da..8fb7c7b 100644 (file)
@@ -854,13 +854,13 @@ key_ref_t key_create_or_update(key_ref_t keyring_ref,
        /* if the client doesn't provide, decide on the permissions we want */
        if (perm == KEY_PERM_UNDEF) {
                perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR;
-               perm |= KEY_USR_VIEW | KEY_USR_SEARCH | KEY_USR_LINK | KEY_USR_SETATTR;
+               perm |= KEY_USR_VIEW;
 
                if (ktype->read)
-                       perm |= KEY_POS_READ | KEY_USR_READ;
+                       perm |= KEY_POS_READ;
 
                if (ktype == &key_type_keyring || ktype->update)
-                       perm |= KEY_USR_WRITE;
+                       perm |= KEY_POS_WRITE;
        }
 
        /* allocate a new key */
index 5d34b4e..4b5c948 100644 (file)
@@ -1132,12 +1132,12 @@ long keyctl_instantiate_key_iov(key_serial_t id,
        ret = rw_copy_check_uvector(WRITE, _payload_iov, ioc,
                                    ARRAY_SIZE(iovstack), iovstack, &iov);
        if (ret < 0)
-               return ret;
+               goto err;
        if (ret == 0)
                goto no_payload_free;
 
        ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
-
+err:
        if (iov != iovstack)
                kfree(iov);
        return ret;
@@ -1495,7 +1495,8 @@ long keyctl_session_to_parent(void)
                goto error_keyring;
        newwork = &cred->rcu;
 
-       cred->tgcred->session_keyring = key_ref_to_ptr(keyring_r);
+       cred->session_keyring = key_ref_to_ptr(keyring_r);
+       keyring_r = NULL;
        init_task_work(newwork, key_change_session_keyring);
 
        me = current;
@@ -1519,7 +1520,7 @@ long keyctl_session_to_parent(void)
        mycred = current_cred();
        pcred = __task_cred(parent);
        if (mycred == pcred ||
-           mycred->tgcred->session_keyring == pcred->tgcred->session_keyring) {
+           mycred->session_keyring == pcred->session_keyring) {
                ret = 0;
                goto unlock;
        }
@@ -1535,9 +1536,9 @@ long keyctl_session_to_parent(void)
                goto unlock;
 
        /* the keyrings must have the same UID */
-       if ((pcred->tgcred->session_keyring &&
-            !uid_eq(pcred->tgcred->session_keyring->uid, mycred->euid)) ||
-           !uid_eq(mycred->tgcred->session_keyring->uid, mycred->euid))
+       if ((pcred->session_keyring &&
+            !uid_eq(pcred->session_keyring->uid, mycred->euid)) ||
+           !uid_eq(mycred->session_keyring->uid, mycred->euid))
                goto unlock;
 
        /* cancel an already pending keyring replacement */
index 6e42df1..6ece7f2 100644 (file)
@@ -257,17 +257,14 @@ error:
  * Allocate a keyring and link into the destination keyring.
  */
 struct key *keyring_alloc(const char *description, kuid_t uid, kgid_t gid,
-                         const struct cred *cred, unsigned long flags,
-                         struct key *dest)
+                         const struct cred *cred, key_perm_t perm,
+                         unsigned long flags, struct key *dest)
 {
        struct key *keyring;
        int ret;
 
        keyring = key_alloc(&key_type_keyring, description,
-                           uid, gid, cred,
-                           (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_ALL,
-                           flags);
-
+                           uid, gid, cred, perm, flags);
        if (!IS_ERR(keyring)) {
                ret = key_instantiate_and_link(keyring, NULL, 0, dest, NULL);
                if (ret < 0) {
@@ -278,6 +275,7 @@ struct key *keyring_alloc(const char *description, kuid_t uid, kgid_t gid,
 
        return keyring;
 }
+EXPORT_SYMBOL(keyring_alloc);
 
 /**
  * keyring_search_aux - Search a keyring tree for a key matching some criteria
index 86468f3..58dfe08 100644 (file)
@@ -45,10 +45,12 @@ int install_user_keyrings(void)
        struct user_struct *user;
        const struct cred *cred;
        struct key *uid_keyring, *session_keyring;
+       key_perm_t user_keyring_perm;
        char buf[20];
        int ret;
        uid_t uid;
 
+       user_keyring_perm = (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_ALL;
        cred = current_cred();
        user = cred->user;
        uid = from_kuid(cred->user_ns, user->uid);
@@ -73,8 +75,8 @@ int install_user_keyrings(void)
                uid_keyring = find_keyring_by_name(buf, true);
                if (IS_ERR(uid_keyring)) {
                        uid_keyring = keyring_alloc(buf, user->uid, INVALID_GID,
-                                                   cred, KEY_ALLOC_IN_QUOTA,
-                                                   NULL);
+                                                   cred, user_keyring_perm,
+                                                   KEY_ALLOC_IN_QUOTA, NULL);
                        if (IS_ERR(uid_keyring)) {
                                ret = PTR_ERR(uid_keyring);
                                goto error;
@@ -89,7 +91,8 @@ int install_user_keyrings(void)
                if (IS_ERR(session_keyring)) {
                        session_keyring =
                                keyring_alloc(buf, user->uid, INVALID_GID,
-                                             cred, KEY_ALLOC_IN_QUOTA, NULL);
+                                             cred, user_keyring_perm,
+                                             KEY_ALLOC_IN_QUOTA, NULL);
                        if (IS_ERR(session_keyring)) {
                                ret = PTR_ERR(session_keyring);
                                goto error_release;
@@ -130,6 +133,7 @@ int install_thread_keyring_to_cred(struct cred *new)
        struct key *keyring;
 
        keyring = keyring_alloc("_tid", new->uid, new->gid, new,
+                               KEY_POS_ALL | KEY_USR_VIEW,
                                KEY_ALLOC_QUOTA_OVERRUN, NULL);
        if (IS_ERR(keyring))
                return PTR_ERR(keyring);
@@ -170,27 +174,18 @@ static int install_thread_keyring(void)
 int install_process_keyring_to_cred(struct cred *new)
 {
        struct key *keyring;
-       int ret;
 
-       if (new->tgcred->process_keyring)
+       if (new->process_keyring)
                return -EEXIST;
 
-       keyring = keyring_alloc("_pid", new->uid, new->gid,
-                               new, KEY_ALLOC_QUOTA_OVERRUN, NULL);
+       keyring = keyring_alloc("_pid", new->uid, new->gid, new,
+                               KEY_POS_ALL | KEY_USR_VIEW,
+                               KEY_ALLOC_QUOTA_OVERRUN, NULL);
        if (IS_ERR(keyring))
                return PTR_ERR(keyring);
 
-       spin_lock_irq(&new->tgcred->lock);
-       if (!new->tgcred->process_keyring) {
-               new->tgcred->process_keyring = keyring;
-               keyring = NULL;
-               ret = 0;
-       } else {
-               ret = -EEXIST;
-       }
-       spin_unlock_irq(&new->tgcred->lock);
-       key_put(keyring);
-       return ret;
+       new->process_keyring = keyring;
+       return 0;
 }
 
 /*
@@ -231,11 +226,12 @@ int install_session_keyring_to_cred(struct cred *cred, struct key *keyring)
        /* create an empty session keyring */
        if (!keyring) {
                flags = KEY_ALLOC_QUOTA_OVERRUN;
-               if (cred->tgcred->session_keyring)
+               if (cred->session_keyring)
                        flags = KEY_ALLOC_IN_QUOTA;
 
-               keyring = keyring_alloc("_ses", cred->uid, cred->gid,
-                                       cred, flags, NULL);
+               keyring = keyring_alloc("_ses", cred->uid, cred->gid, cred,
+                                       KEY_POS_ALL | KEY_USR_VIEW | KEY_USR_READ,
+                                       flags, NULL);
                if (IS_ERR(keyring))
                        return PTR_ERR(keyring);
        } else {
@@ -243,17 +239,11 @@ int install_session_keyring_to_cred(struct cred *cred, struct key *keyring)
        }
 
        /* install the keyring */
-       spin_lock_irq(&cred->tgcred->lock);
-       old = cred->tgcred->session_keyring;
-       rcu_assign_pointer(cred->tgcred->session_keyring, keyring);
-       spin_unlock_irq(&cred->tgcred->lock);
-
-       /* we're using RCU on the pointer, but there's no point synchronising
-        * on it if it didn't previously point to anything */
-       if (old) {
-               synchronize_rcu();
+       old = cred->session_keyring;
+       rcu_assign_pointer(cred->session_keyring, keyring);
+
+       if (old)
                key_put(old);
-       }
 
        return 0;
 }
@@ -368,9 +358,9 @@ key_ref_t search_my_process_keyrings(struct key_type *type,
        }
 
        /* search the process keyring second */
-       if (cred->tgcred->process_keyring) {
+       if (cred->process_keyring) {
                key_ref = keyring_search_aux(
-                       make_key_ref(cred->tgcred->process_keyring, 1),
+                       make_key_ref(cred->process_keyring, 1),
                        cred, type, description, match, no_state_check);
                if (!IS_ERR(key_ref))
                        goto found;
@@ -389,12 +379,10 @@ key_ref_t search_my_process_keyrings(struct key_type *type,
        }
 
        /* search the session keyring */
-       if (cred->tgcred->session_keyring) {
+       if (cred->session_keyring) {
                rcu_read_lock();
                key_ref = keyring_search_aux(
-                       make_key_ref(rcu_dereference(
-                                            cred->tgcred->session_keyring),
-                                    1),
+                       make_key_ref(rcu_dereference(cred->session_keyring), 1),
                        cred, type, description, match, no_state_check);
                rcu_read_unlock();
 
@@ -564,7 +552,7 @@ try_again:
                break;
 
        case KEY_SPEC_PROCESS_KEYRING:
-               if (!cred->tgcred->process_keyring) {
+               if (!cred->process_keyring) {
                        if (!(lflags & KEY_LOOKUP_CREATE))
                                goto error;
 
@@ -576,13 +564,13 @@ try_again:
                        goto reget_creds;
                }
 
-               key = cred->tgcred->process_keyring;
+               key = cred->process_keyring;
                atomic_inc(&key->usage);
                key_ref = make_key_ref(key, 1);
                break;
 
        case KEY_SPEC_SESSION_KEYRING:
-               if (!cred->tgcred->session_keyring) {
+               if (!cred->session_keyring) {
                        /* always install a session keyring upon access if one
                         * doesn't exist yet */
                        ret = install_user_keyrings();
@@ -597,7 +585,7 @@ try_again:
                        if (ret < 0)
                                goto error;
                        goto reget_creds;
-               } else if (cred->tgcred->session_keyring ==
+               } else if (cred->session_keyring ==
                           cred->user->session_keyring &&
                           lflags & KEY_LOOKUP_CREATE) {
                        ret = join_session_keyring(NULL);
@@ -607,7 +595,7 @@ try_again:
                }
 
                rcu_read_lock();
-               key = rcu_dereference(cred->tgcred->session_keyring);
+               key = rcu_dereference(cred->session_keyring);
                atomic_inc(&key->usage);
                rcu_read_unlock();
                key_ref = make_key_ref(key, 1);
@@ -767,12 +755,6 @@ long join_session_keyring(const char *name)
        struct key *keyring;
        long ret, serial;
 
-       /* only permit this if there's a single thread in the thread group -
-        * this avoids us having to adjust the creds on all threads and risking
-        * ENOMEM */
-       if (!current_is_single_threaded())
-               return -EMLINK;
-
        new = prepare_creds();
        if (!new)
                return -ENOMEM;
@@ -784,7 +766,7 @@ long join_session_keyring(const char *name)
                if (ret < 0)
                        goto error;
 
-               serial = new->tgcred->session_keyring->serial;
+               serial = new->session_keyring->serial;
                ret = commit_creds(new);
                if (ret == 0)
                        ret = serial;
@@ -798,8 +780,10 @@ long join_session_keyring(const char *name)
        keyring = find_keyring_by_name(name, false);
        if (PTR_ERR(keyring) == -ENOKEY) {
                /* not found - try and create a new one */
-               keyring = keyring_alloc(name, old->uid, old->gid, old,
-                                       KEY_ALLOC_IN_QUOTA, NULL);
+               keyring = keyring_alloc(
+                       name, old->uid, old->gid, old,
+                       KEY_POS_ALL | KEY_USR_VIEW | KEY_USR_READ | KEY_USR_LINK,
+                       KEY_ALLOC_IN_QUOTA, NULL);
                if (IS_ERR(keyring)) {
                        ret = PTR_ERR(keyring);
                        goto error2;
@@ -807,6 +791,9 @@ long join_session_keyring(const char *name)
        } else if (IS_ERR(keyring)) {
                ret = PTR_ERR(keyring);
                goto error2;
+       } else if (keyring == new->session_keyring) {
+               ret = 0;
+               goto error2;
        }
 
        /* we've got a keyring - now to install it */
@@ -863,8 +850,7 @@ void key_change_session_keyring(struct callback_head *twork)
 
        new->jit_keyring        = old->jit_keyring;
        new->thread_keyring     = key_get(old->thread_keyring);
-       new->tgcred->tgid       = old->tgcred->tgid;
-       new->tgcred->process_keyring = key_get(old->tgcred->process_keyring);
+       new->process_keyring    = key_get(old->process_keyring);
 
        security_transfer_creds(new, old);
 
index 66e2118..4bd6bdb 100644 (file)
@@ -126,6 +126,7 @@ static int call_sbin_request_key(struct key_construction *cons,
 
        cred = get_current_cred();
        keyring = keyring_alloc(desc, cred->fsuid, cred->fsgid, cred,
+                               KEY_POS_ALL | KEY_USR_VIEW | KEY_USR_READ,
                                KEY_ALLOC_QUOTA_OVERRUN, NULL);
        put_cred(cred);
        if (IS_ERR(keyring)) {
@@ -150,12 +151,12 @@ static int call_sbin_request_key(struct key_construction *cons,
                cred->thread_keyring ? cred->thread_keyring->serial : 0);
 
        prkey = 0;
-       if (cred->tgcred->process_keyring)
-               prkey = cred->tgcred->process_keyring->serial;
+       if (cred->process_keyring)
+               prkey = cred->process_keyring->serial;
        sprintf(keyring_str[1], "%d", prkey);
 
        rcu_read_lock();
-       session = rcu_dereference(cred->tgcred->session_keyring);
+       session = rcu_dereference(cred->session_keyring);
        if (!session)
                session = cred->user->session_keyring;
        sskey = session->serial;
@@ -297,14 +298,14 @@ static void construct_get_dest_keyring(struct key **_dest_keyring)
                                break;
 
                case KEY_REQKEY_DEFL_PROCESS_KEYRING:
-                       dest_keyring = key_get(cred->tgcred->process_keyring);
+                       dest_keyring = key_get(cred->process_keyring);
                        if (dest_keyring)
                                break;
 
                case KEY_REQKEY_DEFL_SESSION_KEYRING:
                        rcu_read_lock();
                        dest_keyring = key_get(
-                               rcu_dereference(cred->tgcred->session_keyring));
+                               rcu_dereference(cred->session_keyring));
                        rcu_read_unlock();
 
                        if (dest_keyring)
@@ -347,6 +348,7 @@ static int construct_alloc_key(struct key_type *type,
        const struct cred *cred = current_cred();
        unsigned long prealloc;
        struct key *key;
+       key_perm_t perm;
        key_ref_t key_ref;
        int ret;
 
@@ -355,8 +357,15 @@ static int construct_alloc_key(struct key_type *type,
        *_key = NULL;
        mutex_lock(&user->cons_lock);
 
+       perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR;
+       perm |= KEY_USR_VIEW;
+       if (type->read)
+               perm |= KEY_POS_READ;
+       if (type == &key_type_keyring || type->update)
+               perm |= KEY_POS_WRITE;
+
        key = key_alloc(type, description, cred->fsuid, cred->fsgid, cred,
-                       KEY_POS_ALL, flags);
+                       perm, flags);
        if (IS_ERR(key))
                goto alloc_failed;
 
index 603b087..e69de9c 100644 (file)
@@ -1,6 +1,10 @@
 config SECURITY_SMACK
        bool "Simplified Mandatory Access Control Kernel Support"
-       depends on NETLABEL && SECURITY_NETWORK
+       depends on NET
+       depends on INET
+       depends on SECURITY
+       select NETLABEL
+       select SECURITY_NETWORK
        default n
        help
          This selects the Simplified Mandatory Access Control Kernel.
index 99929a5..76a5dca 100644 (file)
@@ -2063,6 +2063,19 @@ static const struct file_operations smk_revoke_subj_ops = {
        .llseek         = generic_file_llseek,
 };
 
+static struct kset *smackfs_kset;
+/**
+ * smk_init_sysfs - initialize /sys/fs/smackfs
+ *
+ */
+static int smk_init_sysfs(void)
+{
+       smackfs_kset = kset_create_and_add("smackfs", NULL, fs_kobj);
+       if (!smackfs_kset)
+               return -ENOMEM;
+       return 0;
+}
+
 /**
  * smk_fill_super - fill the /smackfs superblock
  * @sb: the empty superblock
@@ -2183,6 +2196,10 @@ static int __init init_smk_fs(void)
        if (!security_module_enable(&smack_ops))
                return 0;
 
+       err = smk_init_sysfs();
+       if (err)
+               printk(KERN_ERR "smackfs: sysfs mountpoint problem.\n");
+
        err = register_filesystem(&smk_fs_type);
        if (!err) {
                smackfs_mount = kern_mount(&smk_fs_type);
index b4c2984..2663145 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/ptrace.h>
 #include <linux/prctl.h>
 #include <linux/ratelimit.h>
+#include <linux/workqueue.h>
 
 #define YAMA_SCOPE_DISABLED    0
 #define YAMA_SCOPE_RELATIONAL  1
@@ -29,12 +30,37 @@ static int ptrace_scope = YAMA_SCOPE_RELATIONAL;
 struct ptrace_relation {
        struct task_struct *tracer;
        struct task_struct *tracee;
+       bool invalid;
        struct list_head node;
+       struct rcu_head rcu;
 };
 
 static LIST_HEAD(ptracer_relations);
 static DEFINE_SPINLOCK(ptracer_relations_lock);
 
+static void yama_relation_cleanup(struct work_struct *work);
+static DECLARE_WORK(yama_relation_work, yama_relation_cleanup);
+
+/**
+ * yama_relation_cleanup - remove invalid entries from the relation list
+ *
+ */
+static void yama_relation_cleanup(struct work_struct *work)
+{
+       struct ptrace_relation *relation;
+
+       spin_lock(&ptracer_relations_lock);
+       rcu_read_lock();
+       list_for_each_entry_rcu(relation, &ptracer_relations, node) {
+               if (relation->invalid) {
+                       list_del_rcu(&relation->node);
+                       kfree_rcu(relation, rcu);
+               }
+       }
+       rcu_read_unlock();
+       spin_unlock(&ptracer_relations_lock);
+}
+
 /**
  * yama_ptracer_add - add/replace an exception for this tracer/tracee pair
  * @tracer: the task_struct of the process doing the ptrace
@@ -48,32 +74,34 @@ static DEFINE_SPINLOCK(ptracer_relations_lock);
 static int yama_ptracer_add(struct task_struct *tracer,
                            struct task_struct *tracee)
 {
-       int rc = 0;
-       struct ptrace_relation *added;
-       struct ptrace_relation *entry, *relation = NULL;
+       struct ptrace_relation *relation, *added;
 
        added = kmalloc(sizeof(*added), GFP_KERNEL);
        if (!added)
                return -ENOMEM;
 
-       spin_lock_bh(&ptracer_relations_lock);
-       list_for_each_entry(entry, &ptracer_relations, node)
-               if (entry->tracee == tracee) {
-                       relation = entry;
-                       break;
+       added->tracee = tracee;
+       added->tracer = tracer;
+       added->invalid = false;
+
+       spin_lock(&ptracer_relations_lock);
+       rcu_read_lock();
+       list_for_each_entry_rcu(relation, &ptracer_relations, node) {
+               if (relation->invalid)
+                       continue;
+               if (relation->tracee == tracee) {
+                       list_replace_rcu(&relation->node, &added->node);
+                       kfree_rcu(relation, rcu);
+                       goto out;
                }
-       if (!relation) {
-               relation = added;
-               relation->tracee = tracee;
-               list_add(&relation->node, &ptracer_relations);
        }
-       relation->tracer = tracer;
 
-       spin_unlock_bh(&ptracer_relations_lock);
-       if (added != relation)
-               kfree(added);
+       list_add_rcu(&added->node, &ptracer_relations);
 
-       return rc;
+out:
+       rcu_read_unlock();
+       spin_unlock(&ptracer_relations_lock);
+       return 0;
 }
 
 /**
@@ -84,16 +112,23 @@ static int yama_ptracer_add(struct task_struct *tracer,
 static void yama_ptracer_del(struct task_struct *tracer,
                             struct task_struct *tracee)
 {
-       struct ptrace_relation *relation, *safe;
+       struct ptrace_relation *relation;
+       bool marked = false;
 
-       spin_lock_bh(&ptracer_relations_lock);
-       list_for_each_entry_safe(relation, safe, &ptracer_relations, node)
+       rcu_read_lock();
+       list_for_each_entry_rcu(relation, &ptracer_relations, node) {
+               if (relation->invalid)
+                       continue;
                if (relation->tracee == tracee ||
                    (tracer && relation->tracer == tracer)) {
-                       list_del(&relation->node);
-                       kfree(relation);
+                       relation->invalid = true;
+                       marked = true;
                }
-       spin_unlock_bh(&ptracer_relations_lock);
+       }
+       rcu_read_unlock();
+
+       if (marked)
+               schedule_work(&yama_relation_work);
 }
 
 /**
@@ -217,21 +252,22 @@ static int ptracer_exception_found(struct task_struct *tracer,
        struct task_struct *parent = NULL;
        bool found = false;
 
-       spin_lock_bh(&ptracer_relations_lock);
        rcu_read_lock();
        if (!thread_group_leader(tracee))
                tracee = rcu_dereference(tracee->group_leader);
-       list_for_each_entry(relation, &ptracer_relations, node)
+       list_for_each_entry_rcu(relation, &ptracer_relations, node) {
+               if (relation->invalid)
+                       continue;
                if (relation->tracee == tracee) {
                        parent = relation->tracer;
                        found = true;
                        break;
                }
+       }
 
        if (found && (parent == NULL || task_is_descendant(parent, tracer)))
                rc = 1;
        rcu_read_unlock();
-       spin_unlock_bh(&ptracer_relations_lock);
 
        return rc;
 }